repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
xindus40223115/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/http/__init__.py
|
1383
|
# This directory is a Python package.
|
FireBladeNooT/Medusa_1_6
|
refs/heads/master
|
lib/requests/packages/urllib3/contrib/ntlmpool.py
|
514
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
o3project/odenos
|
refs/heads/develop
|
src/test/python/org/o3project/odenos/remoteobject/manager/system/test_component_connection.py
|
6
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.remoteobject.manager.system.component_connection\
import ComponentConnection
import unittest
class ComponentConnectionTest(unittest.TestCase):
def setUp(self):
self.target = ComponentConnection("slicer1->network1",
"original",
"running")
def tearDown(self):
self.target = None
def test_constructor_state_running(self):
self.assertEqual(self.target._property[self.target.OBJECT_ID],
"slicer1->network1")
self.assertEqual(self.target._property[self.target.OBJECT_TYPE],
"ComponentConnection")
self.assertEqual(self.target._property[self.target.CONNECTION_TYPE],
"original")
self.assertEqual(self.target._property[self.target.OBJECT_STATE],
"running")
def test_constructor_Not_state(self):
self.target = ComponentConnection("slicer1->network1",
"original",
None)
self.assertEqual(self.target._property[self.target.OBJECT_ID],
"slicer1->network1")
self.assertEqual(self.target._property[self.target.OBJECT_TYPE],
"ComponentConnection")
self.assertEqual(self.target._property[self.target.CONNECTION_TYPE],
"original")
self.assertEqual(self.target._property[self.target.OBJECT_STATE],
"initializing")
def test_id(self):
self.assertEqual(self.target.id, "slicer1->network1")
def test_type(self):
self.assertEqual(self.target.type, "ComponentConnection")
def test_connection_type(self):
self.assertEqual(self.target.connection_type, "original")
def test_state(self):
self.assertEqual(self.target.state, "running")
def test_state_setter(self):
self.target.state = "finalizing"
self.assertEqual(self.target._property[self.target.OBJECT_STATE],
"finalizing")
def test_is_read_only_key_id(self):
self.assertEqual(self.target._is_read_only_key("id"), True)
def test_is_read_only_key_type(self):
self.assertEqual(self.target._is_read_only_key("type"), True)
def test_is_read_only_key_connection_type(self):
self.assertEqual(
self.target._is_read_only_key("connection_type"), True)
def test_is_read_only_key_state(self):
self.assertEqual(self.target._is_read_only_key("state"), False)
def test_get_property_id(self):
self.assertEqual(self.target.get_property("id"),
"slicer1->network1")
def test_get_property_type(self):
self.assertEqual(self.target.get_property("type"),
"ComponentConnection")
def test_get_property_connection_type(self):
self.assertEqual(self.target.get_property("connection_type"),
"original")
def test_get_property_state(self):
self.assertEqual(self.target.get_property("state"),
"running")
def test_set_property_state(self):
self.target.set_property("state", "error")
self.assertEqual(self.target._property["state"],
"error")
def test_set_property_read_only_key(self):
self.target.set_property("id", "slicer1")
self.assertEqual(self.target._property["id"],
"slicer1->network1")
def test_set_property_Same_Old(self):
self.target.set_property("state", "running")
self.assertEqual(self.target._property["state"],
"running")
def test_get_property_keys(self):
self.assertEqual(self.target.get_property_keys(),
self.target._property.keys())
def test_create_from_packed(self):
self.value = {"id": "slicer1->network1",
"type": "ComponentConnection",
"connection_type": "original",
"state": "initializing"}
self.result = ComponentConnection.create_from_packed(self.value)
self.assertEqual(self.result._property[self.target.OBJECT_ID],
"slicer1->network1")
self.assertEqual(self.result._property[self.target.OBJECT_TYPE],
"ComponentConnection")
self.assertEqual(self.result._property[self.target.CONNECTION_TYPE],
"original")
self.assertEqual(self.result._property[self.target.OBJECT_STATE],
"initializing")
def test_create_from_packed_State_None(self):
self.value = {"id": "slicer1->network1",
"type": "ComponentConnection",
"connection_type": "original"}
self.result = ComponentConnection.create_from_packed(self.value)
self.assertEqual(self.result._property[self.target.OBJECT_ID],
"slicer1->network1")
self.assertEqual(self.result._property[self.target.OBJECT_TYPE],
"ComponentConnection")
self.assertEqual(self.result._property[self.target.CONNECTION_TYPE],
"original")
self.assertEqual(self.result._property[self.target.OBJECT_STATE],
"initializing")
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[self.target.OBJECT_ID],
"slicer1->network1")
self.assertEqual(self.result[self.target.OBJECT_TYPE],
"ComponentConnection")
self.assertEqual(self.result[self.target.CONNECTION_TYPE],
"original")
self.assertEqual(self.result[self.target.OBJECT_STATE],
"running")
if __name__ == '__main__':
unittest.main()
|
andhit-r/opnsynid-accounting-report
|
refs/heads/8.0
|
opnsynid_accounting_report_configuration_page/__init__.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 Andhitia Rama. All rights reserved.
# @author Andhitia Rama
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
adambrenecki/django
|
refs/heads/master
|
tests/model_regress/models.py
|
134
|
# coding: utf-8
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
#5218: Test models with non-default primary keys / AutoFields
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
#18432: Chained foreign keys with to_field produce incorrect query
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, unique=True, to_field='model1')
|
wackymaster/QTClock
|
refs/heads/master
|
Libraries/numpy/distutils/fcompiler/pg.py
|
167
|
# http://www.pgroup.com
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
from sys import platform
compilers = ['PGroupFCompiler']
class PGroupFCompiler(FCompiler):
compiler_type = 'pg'
description = 'Portland Group Fortran Compiler'
version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
if platform == 'darwin':
executables = {
'version_cmd' : ["<F77>", "-V"],
'compiler_f77' : ["pgfortran", "-dynamiclib"],
'compiler_fix' : ["pgfortran", "-Mfixed", "-dynamiclib"],
'compiler_f90' : ["pgfortran", "-dynamiclib"],
'linker_so' : ["libtool"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['']
else:
executables = {
'version_cmd' : ["<F77>", "-V"],
'compiler_f77' : ["pgfortran"],
'compiler_fix' : ["pgfortran", "-Mfixed"],
'compiler_f90' : ["pgfortran"],
'linker_so' : ["pgfortran", "-shared", "-fpic"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-module '
module_include_switch = '-I'
def get_flags(self):
opt = ['-Minform=inform', '-Mnosecond_underscore']
return self.pic_flags + opt
def get_flags_opt(self):
return ['-fast']
def get_flags_debug(self):
return ['-g']
if platform == 'darwin':
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
def runtime_library_dir_option(self, dir):
return '-R"%s"' % dir
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='pg')
compiler.customize()
print(compiler.get_version())
|
zstackorg/zstack-woodpecker
|
refs/heads/master
|
integrationtest/vm/virtualrouter/ipsec/test_create_ipsec_resource_stack.py
|
2
|
#coding:utf-8
'''
New Integration Test for zstack cloudformation.
Create an IPsec by resource stack.
@author: chenyuan.xu
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_stack as resource_stack_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.ipsec_operations as ipsec_ops
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import os
def test():
test_stub = test_lib.lib_get_test_stub()
test_obj_dict1 = test_state.TestStateDict()
test_obj_dict2 = test_state.TestStateDict()
global mevoco1_ip
global mevoco2_ip
global ipsec1
global ipsec2
global templateContent
mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
mevoco2_ip = os.environ['secondZStackMnIp']
test_util.test_dsc('Create test vip in mevoco1')
cond = res_ops.gen_query_conditions("category", '=', "Public")
l3_pub1_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
cond = res_ops.gen_query_conditions("name", '=', os.environ.get('l3VlanNetworkName1'))
l3_pri1_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
vip1 = test_stub.create_vip('ipsec1_vip', l3_pub1_queried[0].uuid)
cond = res_ops.gen_query_conditions('uuid', '=', l3_pri1_queried[0].uuid)
first_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Create test vip in mevoco2')
cond = res_ops.gen_query_conditions("category", '=', "Public")
l3_pub2_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
cond = res_ops.gen_query_conditions("name", '=', os.environ.get('l3VlanDNATNetworkName'))
l3_pri2_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName'))
vip2 = test_stub.create_vip('ipsec2_vip', l3_pub2_queried[0].uuid)
cond = res_ops.gen_query_conditions('uuid', '=', l3_pri2_queried[0].uuid)
second_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
templateContent = '''
{
"ZStackTemplateFormatVersion": "2018-06-18",
"Description": "本示例会创建一个简单的IPsec通道,需要用户提供下面正确的数据\n已有的虚拟IP地址,\n本地子网Uuid,远端IP,远端CIDR,认证密钥",
"Parameters": {
"VipUuid":{
"Type": "String",
"Label": "虚拟IP",
"Description":"已有的虚拟IP的Uuid"
},
"PrivateNetworkUuid":{
"Type": "String",
"Label": "本地网络",
"Description":"本地网络Uuid"
},
"PeerAddress": {
"Type": "String",
"Description":"远端IP"
},
"PeerCidrs":{
"Type": "CommaDelimitedList",
"Description":"远端 Cidr"
},
"AuthKey":{
"Type": "String",
"DefaultValue":"Test1234"
}
},
"Resources": {
"IPsecConnection":{
"Type": "ZStack::Resource::IPsecConnection",
"Properties": {
"name": "IPsec-STACK",
"vipUuid": {"Ref": "VipUuid"},
"l3NetworkUuid": {"Ref":"PrivateNetworkUuid"},
"peerAddress": {"Ref":"PeerAddress"},
"peerCidrs": {"Ref":"PeerCidrs"},
"authKey": {"Ref":"AuthKey"}
}
}
},
"Outputs": {
"IPsecConnection": {
"Value": {
"Ref": "IPsecConnection"
}
}
}
}
'''
#1.create resource stack
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_dsc('Create ipsec in mevoco1')
resource_stack1_option = test_util.ResourceStackOption()
resource_stack1_option.set_name("Create_STACK-IPSEC1")
resource_stack1_option.set_rollback("true")
print ('aooo = %s is %s') % ([second_zstack_cidrs], type([second_zstack_cidrs]))
parameter1 = '{"VipUuid":"%s","PrivateNetworkUuid":"%s","PeerAddress":"%s","PeerCidrs":"%s"}' % (vip1.get_vip().uuid, l3_pri1_queried[0].uuid, vip2.get_vip().ip, second_zstack_cidrs)
resource_stack1_option.set_templateContent(templateContent)
resource_stack1_option.set_parameters(parameter1)
preview_resource_stack1 = resource_stack_ops.preview_resource_stack(resource_stack1_option)
resource_stack1 = resource_stack_ops.create_resource_stack(resource_stack1_option)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Create ipsec in mevoco2')
resource_stack2_option = test_util.ResourceStackOption()
resource_stack2_option.set_name("Create_STACK-IPSEC2")
resource_stack2_option.set_rollback("true")
parameter2 = '{"VipUuid":"%s","PrivateNetworkUuid":"%s","PeerAddress":"%s","PeerCidrs":"%s"}' % (vip2.get_vip().uuid, l3_pri2_queried[0].uuid, vip1.get_vip().ip, first_zstack_cidrs)
resource_stack2_option.set_templateContent(templateContent)
resource_stack2_option.set_parameters(parameter2)
preview_resource_stack2 = resource_stack_ops.preview_resource_stack(resource_stack2_option)
resource_stack2 = resource_stack_ops.create_resource_stack(resource_stack2_option)
#2.query resource stack
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_dsc('Query resource stack in mevoco1')
cond = res_ops.gen_query_conditions('uuid', '=', resource_stack1.uuid)
resource_stack1_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond)
cond = res_ops.gen_query_conditions('name', '=', 'IPsec-STACK')
ipsec1_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond)
if len(resource_stack1_queried) == 0:
test_util.test_fail("Fail to query resource stack")
if resource_stack1_queried[0].status == 'Created':
if len(ipsec1_queried) == 0:
test_util.test_fail("Fail to create ipsec connection when resource stack status is Created")
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Query resource stack in mevoco2')
cond = res_ops.gen_query_conditions('uuid', '=', resource_stack2.uuid)
resource_stack2_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond)
cond = res_ops.gen_query_conditions('name', '=', 'IPsec-STACK')
ipsec2_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond)
if len(resource_stack2_queried) == 0:
test_util.test_fail("Fail to query resource stack")
if resource_stack2_queried[0].status == 'Created':
if len(ipsec2_queried) == 0 :
test_util.test_fail("Fail to create ipsec connection when resource stack status is Created")
#3.get resource from resource stack
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_dsc('Get resource from resource stack in mevoco1')
resource1 = resource_stack_ops.get_resource_from_resource_stack(resource_stack1.uuid)
if resource1 == None or len(resource1) != 1:
test_util.test_fail("Fail to get resource from resource_stack")
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Get resource from resource stack in mevoco2')
resource2 = resource_stack_ops.get_resource_from_resource_stack(resource_stack2.uuid)
if resource2 == None or len(resource1) != 1:
test_util.test_fail("Fail to get resource from resource_stack")
#4.query event from resource stack
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_dsc('Get resource from resource stack in mevoco1')
cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack1.uuid)
event1 = res_ops.query_event_from_resource_stack(cond)
if event1 == None or len(event1) != 2:
test_util.test_fail("Fail to get event from resource_stack")
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Get resource from resource stack in mevoco2')
cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack2.uuid)
event2 = res_ops.query_event_from_resource_stack(cond)
if event2 == None or len(event2) != 2:
test_util.test_fail("Fail to get event from resource_stack")
#5.delete resource stack
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_dsc('Delete resource stack in mevoco1')
resource_stack_ops.delete_resource_stack(resource_stack1.uuid)
cond = res_ops.gen_query_conditions('uuid', '=', resource_stack1.uuid)
resource_stack1_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-IPSEC1')
ipsec1_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond)
if len(resource_stack1_queried) != 0:
test_util.test_fail("Fail to delete resource stack")
elif len(ipsec1_queried) != 0:
test_util.test_fail("Fail to delete ipsec connection when resource stack is deleted")
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Delete resource stack in mevoco2')
resource_stack_ops.delete_resource_stack(resource_stack2.uuid)
cond = res_ops.gen_query_conditions('uuid', '=', resource_stack2.uuid)
resource_stack2_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-IPSEC2')
ipsec2_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond)
if len(resource_stack2_queried) != 0:
test_util.test_fail("Fail to delete resource stack")
elif len(ipsec2_queried) != 0:
test_util.test_fail("Fail to delete ipsec connection when resource stack is deleted")
test_util.test_pass('Create IPsec Resource Stack Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
print "Ignore cleanup"
|
ST-Data-Mining/crater
|
refs/heads/master
|
wei/tuner.py
|
1
|
from __future__ import division
import random, pdb
from main import *
from base import *
import collections
# from file import *
from start import *
class DeBase(object):
def __init__(i):
global The
i.np = Settings.de.np
i.fa = Settings.de.f
i.cr = Settings.de.cr
i.repeats = Settings.de.repeats
i.life = Settings.de.life
# i.obj = The.option.tunedobjective
i.obj = -1 ### need to change this to the above line after done!
i.evaluation = 0
i.candidates = []
i.scores = {}
i.frontier = [i.generate() for _ in xrange(i.np)]
i.evaluate()
i.bestconf, i.bestscore=i.best()
def generate(i):
raise NotImplementedError(" error")
def evalute(i):
raise NotImplementedError(" error")
def assign(i, tobetuned, tunedvalue):
keys = tobetuned.keys()
for key,val in zip(keys, tunedvalue):
exec(key +"= "+str(val))
tobetuned[key] =val
def best(i):
sortlst = [], [], None
if i.obj == 1: # this is for pf
sortlst= sorted(i.scores.items(), key=lambda x: x[1][i.obj], reverse = True) # alist of turple
else:
sortlst = sorted(i.scores.items(), key=lambda x: x[1][i.obj]) # alist of turple
bestconf = i.frontier[sortlst[-1][0]] #[(0, [100, 73, 9, 42]), (1, [75, 41, 12, 66])]
bestscore = sortlst[-1][-1][i.obj]
# pdb.set_trace()
return bestconf, bestscore
def allModel(i): raise NotImplementedError(" error")
def gen3(i, n, f):
seen = [n]
def gen1(seen):
while 1:
k = random.randint(0, i.np - 1)
if k not in seen:
seen += [k]
break
return i.frontier[k]
a = gen1(seen)
b = gen1(seen)
c = gen1(seen)
return a, b, c
def update(i, n, old):
newf = []
a, b, c = i.gen3(n, old)
for k in xrange(len(old)):
if isinstance(old[k], bool):
newf.append(old[k] if i.cr < random.random() else not old[k])
else:
newf.append(old[k] if i.cr < random.random() else i.trim(k,(a[k] + i.fa * (b[k] - c[k]))))
return i.treat(newf)
# def trim(i, x): return max(Settings.de.rfLimit_Min[i], min(int(x),Settings.de.rfLimit_Max[i]))
def DE(i):
changed = False
def isBetter(new, old): return new < old if i.obj == 1 else new >old
for k in xrange(i.repeats):
if i.life <= 0:
break
nextgeneration = []
for n, f in enumerate(i.frontier):
new = i.update(n,f)
i.assign(i.tobetuned,new)
newscore = i.callModel()
i.evaluation +=1
if isBetter(newscore[i.obj], i.scores[n][i.obj]):
nextgeneration.append(new)
i.scores[n] = newscore[:]
changed = True
else:
nextgeneration.append(f)
i.frontier = nextgeneration[:]
newbestconf, newbestscore = i.best()
if isBetter(newbestscore, i.bestscore):
print "newbestscore %s:" % str(newbestscore)
print "bestconf %s :" % str(newbestconf)
i.bestscore = newbestscore
i.bestconf = newbestconf[:]
if not changed:
i.life -=1
changed = False
i.assign(i.tobetuned,i.bestconf)
print "DONE !!!!"
class Where(DeBase):
def __init__(i):
i.tobetuned = collections.OrderedDict((
("The.tree.infoPrune" ,The.tree.infoPrune),
("The.tree.min",The.tree.min),
("The.option.threshold",The.option.threshold),
("The.where.wriggle",The.where.wriggle),
("The.where.depthMax",The.where.depthMax),
("The.where.depthMin",The.where.depthMin),
("The.option.minSize",The.option.minSize),
("The.tree.prune",The.tree.prune),
("The.where.prune",The.where.prune)))
i.limit = Settings.de.limit
super(Where,i).__init__()
def genFloat(i,l): return round(random.uniform(0.01,l) ,2)
def genInt(i,l): return int(random.uniform(1,l))
def genBool(i): return random.random() <= 0.5
def generate(i):
i.candidates = [i.genFloat(l) if k not in [1,4,5] else\
i.genInt(l) for k,l in enumerate(i.limit)]
i.candidates.extend([i.genBool() for _ in range(2)]) # 1: treePrune, 2:whereprune
return i.treat(i.candidates)
def treat(i, lst):
if lst[-1] and lst[4] <= lst[5]:
lst[4] = i.genInt(4)
lst[5] = i.genInt(5)
lst = i.treat(lst)
return lst
def callModel(i): return main()[-1]
def evaluate(i):
The.data.train =["./data/ivy/ivy-1.1.csv"]
The.data.predict ="./data/ivy/ivy-1.4.csv"
for n, arglst in enumerate(i.frontier):
i.assign(i.tobetuned,arglst)
i.scores[n] = i.callModel() # main return [[pd,pf,prec,f,g],[pd,pf,prec,f,g]], which are N-defective,Y-defecitve
print i.scores
def trim(i, n,x):
if n in [1,4,5]:
return max(1, min(int(x),i.limit[n]))
else:
return max(0.01, min(round(x,2), i.limit[n]))
#test
Where().DE()
|
StoDevX/cs251-toolkit
|
refs/heads/master
|
cs251tk/toolkit/test_args.py
|
1
|
import datetime
from .args import (
build_argparser,
get_students_from_args,
get_assignments_from_args,
compute_stogit_url,
)
def args(arglist):
return vars(build_argparser().parse_args(args=arglist))
students = {
'my': ['rives'],
'section-a': ['student-a'],
'section-b': ['student-b'],
}
def test_all():
# check that --all includes all students
assert get_students_from_args(**args(['--all']), _all_students=students) == students['my'] + students['section-a'] + students['section-b']
def test_students():
# multiple sets of --students should wind up as one flattened list
assert get_students_from_args(**args(['--students', 'a', 'b', '--students', 'c']), _all_students=students) == ['a', 'b', 'c']
# it should return a sorted list of student names
assert get_students_from_args(**args(['--students', 'c', 'b', '--students', 'a']), _all_students=students) == ['a', 'b', 'c']
# multiple occurences of the same student should be removed
assert get_students_from_args(**args(['--students', 'a', 'a', '--students', 'a']), _all_students=students) == ['a']
# if no students are given, it should default to the "my" section
assert get_students_from_args(**args([]), _all_students=students) == students['my']
def test_section():
# "--section $name" should return the students for that section
assert get_students_from_args(**args(['--section', 'a']), _all_students=students) == students['section-a']
def test_record():
assert get_assignments_from_args(**args(['--record', 'hw4'])) == ['hw4']
def test_stogit_url_computation():
assert compute_stogit_url(stogit=None, course='sd', _now=datetime.date(2017, 1, 31)) \
== '[email protected]:sd-s17'
assert compute_stogit_url(stogit=None, course='sd', _now=datetime.date(2016, 9, 15)) \
== '[email protected]:sd-f16'
assert compute_stogit_url(stogit=None, course='sd', _now=datetime.date(2016, 4, 15)) \
== '[email protected]:sd-s16'
assert compute_stogit_url(stogit='blah', course='sd', _now=datetime.date.today()) \
== 'blah'
assert compute_stogit_url(stogit=None, course='hd', _now=datetime.date(2016, 4, 15)) \
== '[email protected]:hd-s16'
|
Mixser/django
|
refs/heads/master
|
django/db/backends/mysql/introspection.py
|
363
|
from collections import namedtuple
from MySQLdb.constants import FIELD_TYPE
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.datastructures import OrderedSet
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra', 'default'))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and 'auto_increment' in description.extra:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT column_name, data_type, character_maximum_length, numeric_precision,
numeric_scale, extra, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
to_int = lambda i: int(i) if i is not None else i
fields = []
for line in cursor.description:
col_name = force_text(line[0])
fields.append(
FieldInfo(*((col_name,)
+ line[1:3]
+ (to_int(field_info[col_name].max_len) or line[3],
to_int(field_info[col_name].num_prec) or line[4],
to_int(field_info[col_name].num_scale) or line[5])
+ (line[6],)
+ (field_info[col_name].extra,)
+ (field_info[col_name].column_default,)))
)
return fields
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieves the storage engine for a given table. Returns the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s AND
kc.table_name = %s
"""
cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s AND
c.table_name = %s
"""
cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': True,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
|
ingadhoc/odoo-addons
|
refs/heads/8.0
|
project_related_projects/report/__init__.py
|
34
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ros-industrial/robotiq
|
refs/heads/kinetic-devel
|
robotiq_2f_gripper_control/nodes/Robotiq2FGripperSimpleController.py
|
1
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Robotiq, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Robotiq, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2012, Robotiq, Inc.
# Revision $Id$
"""@package docstring
Command-line interface for sending simple commands to a ROS node controlling a 2F gripper.
This serves as an example for publishing messages on the 'Robotiq2FGripperRobotOutput' topic using the 'Robotiq2FGripper_robot_output' msg type for sending commands to a 2F gripper.
"""
import roslib; roslib.load_manifest('robotiq_2f_gripper_control')
import rospy
from robotiq_2f_gripper_control.msg import _Robotiq2FGripper_robot_output as outputMsg
from time import sleep
def genCommand(char, command):
"""Update the command according to the character entered by the user."""
if char == 'a':
command = outputMsg.Robotiq2FGripper_robot_output();
command.rACT = 1
command.rGTO = 1
command.rSP = 255
command.rFR = 150
if char == 'r':
command = outputMsg.Robotiq2FGripper_robot_output();
command.rACT = 0
if char == 'c':
command.rPR = 255
if char == 'o':
command.rPR = 0
#If the command entered is a int, assign this value to rPRA
try:
command.rPR = int(char)
if command.rPR > 255:
command.rPR = 255
if command.rPR < 0:
command.rPR = 0
except ValueError:
pass
if char == 'f':
command.rSP += 25
if command.rSP > 255:
command.rSP = 255
if char == 'l':
command.rSP -= 25
if command.rSP < 0:
command.rSP = 0
if char == 'i':
command.rFR += 25
if command.rFR > 255:
command.rFR = 255
if char == 'd':
command.rFR -= 25
if command.rFR < 0:
command.rFR = 0
return command
def askForCommand(command):
"""Ask the user for a command to send to the gripper."""
currentCommand = 'Simple 2F Gripper Controller\n-----\nCurrent command:'
currentCommand += ' rACT = ' + str(command.rACT)
currentCommand += ', rGTO = ' + str(command.rGTO)
currentCommand += ', rATR = ' + str(command.rATR)
currentCommand += ', rPR = ' + str(command.rPR )
currentCommand += ', rSP = ' + str(command.rSP )
currentCommand += ', rFR = ' + str(command.rFR )
print currentCommand
strAskForCommand = '-----\nAvailable commands\n\n'
strAskForCommand += 'r: Reset\n'
strAskForCommand += 'a: Activate\n'
strAskForCommand += 'c: Close\n'
strAskForCommand += 'o: Open\n'
strAskForCommand += '(0-255): Go to that position\n'
strAskForCommand += 'f: Faster\n'
strAskForCommand += 'l: Slower\n'
strAskForCommand += 'i: Increase force\n'
strAskForCommand += 'd: Decrease force\n'
strAskForCommand += '-->'
return raw_input(strAskForCommand)
def publisher():
"""Main loop which requests new commands and publish them on the Robotiq2FGripperRobotOutput topic."""
rospy.init_node('Robotiq2FGripperSimpleController')
pub = rospy.Publisher('Robotiq2FGripperRobotOutput', outputMsg.Robotiq2FGripper_robot_output)
command = outputMsg.Robotiq2FGripper_robot_output();
while not rospy.is_shutdown():
command = genCommand(askForCommand(command), command)
pub.publish(command)
rospy.sleep(0.1)
if __name__ == '__main__':
publisher()
|
kinverarity1/bruges
|
refs/heads/master
|
docs/conf.py
|
3
|
# -*- coding: utf-8 -*-
#
# bruges documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 12 16:14:28 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bruges'
copyright = u'2015, Agile Geoscience'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'brugesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bruges.tex', u'bruges Documentation',
u'Evan Bianco, Ben Bougher, Matt Hall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bruges', u'bruges Documentation',
[u'Evan Bianco, Ben Bougher, Matt Hall'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bruges', u'bruges Documentation',
u'Evan Bianco, Ben Bougher, Matt Hall', 'bruges', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
aldian/tensorflow
|
refs/heads/master
|
tensorflow/tools/graph_transforms/python/transform_graph_test.py
|
169
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.tools.graph_transforms import TransformGraph
class TransformGraphTest(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/lib/tests/test_polynomial.py
|
32
|
from __future__ import division, absolute_import, print_function
'''
>>> p = np.poly1d([1.,2,3])
>>> p
poly1d([ 1., 2., 3.])
>>> print(p)
2
1 x + 2 x + 3
>>> q = np.poly1d([3.,2,1])
>>> q
poly1d([ 3., 2., 1.])
>>> print(q)
2
3 x + 2 x + 1
>>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j]))
3 2
(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)
>>> print(np.poly1d([-3, -2, -1]))
2
-3 x - 2 x - 1
>>> p(0)
3.0
>>> p(5)
38.0
>>> q(0)
1.0
>>> q(5)
86.0
>>> p * q
poly1d([ 3., 8., 14., 8., 3.])
>>> p / q
(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
>>> p + q
poly1d([ 4., 4., 4.])
>>> p - q
poly1d([-2., 0., 2.])
>>> p ** 4
poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
>>> p(q)
poly1d([ 9., 12., 16., 8., 6.])
>>> q(p)
poly1d([ 3., 12., 32., 40., 34.])
>>> np.asarray(p)
array([ 1., 2., 3.])
>>> len(p)
2
>>> p[0], p[1], p[2], p[3]
(3.0, 2.0, 1.0, 0)
>>> p.integ()
poly1d([ 0.33333333, 1. , 3. , 0. ])
>>> p.integ(1)
poly1d([ 0.33333333, 1. , 3. , 0. ])
>>> p.integ(5)
poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
0. , 0. , 0. ])
>>> p.deriv()
poly1d([ 2., 2.])
>>> p.deriv(2)
poly1d([ 2.])
>>> q = np.poly1d([1.,2,3], variable='y')
>>> print(q)
2
1 y + 2 y + 3
>>> q = np.poly1d([1.,2,3], variable='lambda')
>>> print(q)
2
1 lambda + 2 lambda + 3
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
(poly1d([ 1., -1.]), poly1d([ 0.]))
'''
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
assert_almost_equal, rundocs
)
class TestDocs(TestCase):
def test_doctests(self):
return rundocs()
def test_roots(self):
assert_array_equal(np.roots([1, 0, 0]), [0, 0])
def test_str_leading_zeros(self):
p = np.poly1d([4, 3, 2, 1])
p[3] = 0
assert_equal(str(p),
" 2\n"
"3 x + 2 x + 1")
p = np.poly1d([1, 2])
p[0] = 0
p[1] = 0
assert_equal(str(p), " \n0")
def test_polyfit(self):
c = np.array([3., 2., 1.])
x = np.linspace(0, 2, 7)
y = np.polyval(c, x)
err = [1, -1, 1, -1, 1, -1, 1]
weights = np.arange(8, 1, -1)**2/7.0
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
val0 = [[2.9388, -5.8776, 1.6327],
[-5.8776, 12.7347, -4.2449],
[1.6327, -4.2449, 2.3220]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
val = [[8.7929, -10.0103, 0.9756],
[-10.0103, 13.6134, -1.8178],
[0.9756, -1.8178, 0.6674]]
assert_almost_equal(val, cov2, decimal=4)
# check 2D (n,1) case
y = y[:, np.newaxis]
c = c[:, np.newaxis]
assert_almost_equal(c, np.polyfit(x, y, 2))
# check 2D (n,2) case
yy = np.concatenate((y, y), axis=1)
cc = np.concatenate((c, c), axis=1)
assert_almost_equal(cc, np.polyfit(x, yy, 2))
m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
assert_almost_equal(est, m[:, 0], decimal=4)
assert_almost_equal(est, m[:, 1], decimal=4)
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
def test_objects(self):
from decimal import Decimal
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
p2 = p * Decimal('1.333333333333333')
assert_(p2[1] == Decimal("3.9999999999999990"))
p2 = p.deriv()
assert_(p2[1] == Decimal('8.0'))
p2 = p.integ()
assert_(p2[3] == Decimal("1.333333333333333333333333333"))
assert_(p2[2] == Decimal('1.5'))
assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
def test_complex(self):
p = np.poly1d([3j, 2j, 1j])
p2 = p.integ()
assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())
p2 = p.deriv()
assert_((p2.coeffs == [6j, 2j]).all())
def test_integ_coeffs(self):
p = np.poly1d([3, 2, 1])
p2 = p.integ(3, k=[9, 7, 6])
assert_(
(p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all())
def test_zero_dims(self):
try:
np.poly(np.zeros((0, 0)))
except ValueError:
pass
if __name__ == "__main__":
run_module_suite()
|
franky88/emperioanimesta
|
refs/heads/master
|
env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py
|
514
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
treasure-data/digdag
|
refs/heads/master
|
digdag-standards/src/main/resources/digdag/standards/py/runner.py
|
1
|
import sys
import os
import json
import types
import inspect
import collections
import traceback
command = sys.argv[1]
in_file = sys.argv[2]
out_file = sys.argv[3]
with open(in_file) as f:
in_data = json.load(f)
params = in_data['params']
# fake digdag_env module already imported
digdag_env_mod = sys.modules['digdag_env'] = types.ModuleType('digdag_env')
digdag_env_mod.params = params
digdag_env_mod.subtask_config = collections.OrderedDict()
digdag_env_mod.export_params = {}
digdag_env_mod.store_params = {}
digdag_env_mod.state_params = {}
import digdag_env
# fake digdag module already imported
digdag_mod = sys.modules['digdag'] = types.ModuleType('digdag')
class Env(object):
def __init__(self, digdag_env_mod):
self.params = digdag_env_mod.params
self.subtask_config = digdag_env_mod.subtask_config
self.export_params = digdag_env_mod.export_params
self.store_params = digdag_env_mod.store_params
self.state_params = digdag_env_mod.state_params
self.subtask_index = 0
def set_state(self, params={}, **kwds):
self.state_params.update(params)
self.state_params.update(kwds)
def export(self, params={}, **kwds):
self.export_params.update(params)
self.export_params.update(kwds)
def store(self, params={}, **kwds):
self.store_params.update(params)
self.store_params.update(kwds)
def add_subtask(self, function=None, **params):
if function is not None and not isinstance(function, dict):
if hasattr(function, "im_class"):
# Python 2
command = ".".join([function.im_class.__module__, function.im_class.__name__, function.__name__])
else:
# Python 3
command = ".".join([function.__module__, function.__qualname__])
config = params
config["py>"] = command
else:
if isinstance(function, dict):
config = function.copy()
config.update(params)
else:
config = params
try:
json.dumps(config)
except Exception as error:
raise TypeError("Parameters must be serializable using JSON: %s" % str(error))
self.subtask_config["+subtask" + str(self.subtask_index)] = config
self.subtask_index += 1
digdag_mod.env = Env(digdag_env_mod)
import digdag
# add the archive path to import path
sys.path.append(os.path.abspath(os.getcwd()))
def digdag_inspect_command(command):
# package.name.Class.method
fragments = command.split(".")
method_name = fragments.pop()
class_type = None
callable_type = None
try:
mod = __import__(".".join(fragments), fromlist=[method_name])
try:
callable_type = getattr(mod, method_name)
except AttributeError as error:
raise AttributeError("Module '%s' has no attribute '%s'" % (".".join(fragments), method_name))
except ImportError as error:
class_name = fragments.pop()
mod = __import__(".".join(fragments), fromlist=[class_name])
try:
class_type = getattr(mod, class_name)
except AttributeError as error:
raise AttributeError("Module '%s' has no attribute '%s'" % (".".join(fragments), method_name))
if type(callable_type) == type:
class_type = callable_type
method_name = "run"
if class_type is not None:
return (class_type, method_name)
else:
return (callable_type, None)
def digdag_inspect_arguments(callable_type, exclude_self, params):
if callable_type == object.__init__:
# object.__init__ accepts *varargs and **keywords but it throws exception
return {}
if hasattr(inspect, 'getfullargspec'): # Python3
spec = inspect.getfullargspec(callable_type)
keywords_ = spec.varkw
else: # Python 2
spec = inspect.getargspec(callable_type)
keywords_ = spec.keywords
args = {}
for idx, key in enumerate(spec.args):
if exclude_self and idx == 0:
continue
if key in params:
args[key] = params[key]
else:
if spec.defaults is None or idx < len(spec.args) - len(spec.defaults):
# this keyword is required but not in params. raising an error.
if hasattr(callable_type, '__qualname__'):
# Python 3
name = callable_type.__qualname__
elif hasattr(callable_type, 'im_class'):
# Python 2
name = "%s.%s" % (callable_type.im_class.__name__, callable_type.__name__)
else:
name = callable_type.__name__
raise TypeError("Method '%s' requires parameter '%s' but not set" % (name, key))
if keywords_:
# above code was only for validation
return params
else:
return args
error = None
error_message = None
error_value = None
error_traceback = None
callable_type = None
method_name = None
try:
callable_type, method_name = digdag_inspect_command(command)
if method_name:
init_args = digdag_inspect_arguments(callable_type.__init__, True, params)
instance = callable_type(**init_args)
method = getattr(instance, method_name)
method_args = digdag_inspect_arguments(method, True, params)
result = method(**method_args)
else:
args = digdag_inspect_arguments(callable_type, False, params)
result = callable_type(**args)
except SystemExit as e:
# SystemExit only shows an exit code and it is not kind to users. So this block creates a specific error message.
# This error will happen if called python module name and method name are equal to those of the standard library module. (e.g. tokenize.main)
error = Exception("Failed to call python command with code:%d" % e.code, "Possible cause: Ivalid python module call, duplicae module name with standard library")
error_type, error_value, _tb = sys.exc_info()
error_message = "%s %s" % (error.args[0], error.args[1])
error_traceback = traceback.format_exception(error_type, error_value, _tb)
except Exception as e:
error = e
error_type, error_value, _tb = sys.exc_info()
error_message = str(error_value)
error_traceback = traceback.format_exception(error_type, error_value, _tb)
out = {
'subtask_config': digdag_env.subtask_config,
'export_params': digdag_env.export_params,
'store_params': digdag_env.store_params,
#'state_params': digdag_env.state_params, # only for retrying
}
if error:
out['error'] = {
'class': error_value.__class__.__name__,
'message': error_message,
'backtrace': error_traceback
}
with open(out_file, 'w') as f:
json.dump(out, f)
if error:
raise error
|
oculusstorystudio/kraken
|
refs/heads/develop_OSS
|
Python/kraken/ui/HAppkit_Editors/core/__init__.py
|
2
|
#
# Copyright 2015 Horde Software Inc. All rights reserved.
#
|
NextINpact/LaPresseLibreSDK
|
refs/heads/master
|
python_django/sdk_lpl/models/VerificationModel.py
|
1
|
class VerificationModel(object):
def __init__(self, password, mail, code):
self.Password = password
self.Mail = mail
self.CodeUtilisateur = code
|
weiawe/django
|
refs/heads/master
|
django/core/mail/backends/smtp.py
|
477
|
"""SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.core.mail.utils import DNS_NAME
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
ssl_keyfile=None, ssl_certfile=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout
self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile
self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
if self.use_ssl:
connection_params.update({
'keyfile': self.ssl_keyfile,
'certfile': self.ssl_certfile,
})
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
|
gregcaporaso/sketchbook
|
refs/heads/master
|
2014.04-sortmerna-tax/assign-taxonomy-sortmerna.py
|
1
|
#!/usr/bin/env python
# File created on 11 Apr 2014
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2014, The QIIME Project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from tempfile import mkdtemp
from collections import defaultdict, Counter
from os.path import join
from shutil import rmtree
from skbio.parse.sequences import parse_fasta
from qiime.util import (parse_command_line_parameters, make_option,
load_qiime_config, get_qiime_temp_dir,
qiime_system_call, create_dir, remove_files)
from qiime.assign_taxonomy import TaxonAssigner
qiime_config = load_qiime_config()
default_reference_seqs_fp = qiime_config['assign_taxonomy_reference_seqs_fp']
default_id_to_taxonomy_fp = qiime_config['assign_taxonomy_id_to_taxonomy_fp']
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
# Members of the tuple in script_usage are (title, description, example call)
script_info['script_usage'] = [("","","")]
script_info['output_description']= ""
script_info['required_options'] = [
make_option('-i', '--input_query_fp', type='existing_filepath',
help='the query sequences'),
make_option('-o', '--output_dir', type='new_dirpath',
help='directory to store output and log files')
]
script_info['optional_options'] = [
make_option('-t', '--id_to_taxonomy_fp', type="existing_filepath",
help='Path to tab-delimited file mapping sequences to their '
'taxonomy. Each assigned taxonomy is provided as a '
'semicolon-separated string. [default: %s]'
% default_id_to_taxonomy_fp,
default=default_id_to_taxonomy_fp),
make_option('-r', '--reference_seqs_fp', type="existing_filepath",
help='Path to reference sequences. These '
'are indexed to create the reference database. '
'[default: %s; REQUIRED if -b is not provided]'
% default_reference_seqs_fp,
default=default_reference_seqs_fp),
make_option('-b', '--reference_seqs_idx_fp',
help='Path to pre-indexed reference sequences. These '
'are indexed to create the reference database. '
'[default: computed on-the-fly]', default=None),
make_option('--sortmerna_params', default=None,
help='string of parameters to pass to sortmerna'
' [default: no additional parameters as passed]'),
make_option('--indexdb_params', default=None,
help='string of parameters to pass to indexdb_rna'
' [default: no additional parameters as passed]'),
make_option('--min_consensus_fraction', default=0.51, type=float,
help='Minimum fraction of database hits that must have a '
'specific taxonomic assignment to assign that taxonomy '
'to a query [default: %default]'),
make_option('--min_percent_id', default=0.0, type=float,
help='Minimum percent identity to consider an alignment '
'to be a hit [default: all alignments are considered hits]'),
make_option('--best', default=None, type=int,
help='sortmerna\'s --best parameter [default: %default]'),
make_option('--num_alignments', default=None, type=int,
help='sortmerna\'s --num_alignments parameter [default: %default]'),
make_option('--min_lis', default=2, type=int,
help='sortmerna\'s min_lis parameter [default: %default]'),
]
script_info['version'] = __version__
def call_cmd(cmd, HALT_EXEC):
if HALT_EXEC:
print cmd
exit(0)
else:
stdout, stderr, exit_status = qiime_system_call(cmd)
if exit_status != 0:
print "indexdb_rna failed!\nSTDOUT\n%s\nSTDERR\n%s\n" \
% (stdout, stderr)
exit(1)
return cmd
def sortmerna_indexdb(input_fp, output_fp, params="", HALT_EXEC=False):
"""
"""
cmd = "indexdb_rna --ref %s,%s -v %s" % (input_fp, output_fp, params)
return call_cmd(cmd, HALT_EXEC)
def sortmerna_map(query_fp, refseqs_fasta_fp, refseqs_index_fp, blast_output_fp,
min_lis=2, best=None, num_alignments=None, params="",
HALT_EXEC=False):
"""
"""
if best is not None:
params = " ".join([params, "--best", str(best), "--min_lis",
str(min_lis)])
elif num_alignments is not None:
params = " ".join([params, "--num_alignments", str(num_alignments)])
cmd = "sortmerna --ref %s,%s --reads %s --blast 1 --aligned %s -v %s" \
% (refseqs_fasta_fp, refseqs_index_fp, query_fp, blast_output_fp,
params)
return call_cmd(cmd, HALT_EXEC)
def blast_to_tax_assignments(blast_output_f, id_to_taxonomy_map, min_percent_id=0.0):
"""
"""
result = defaultdict(list)
for line in blast_output_f:
fields = line.split('\t')
subject_id = fields[1]
percent_id = float(fields[2])
if percent_id > min_percent_id:
subject_tax = id_to_taxonomy_map[subject_id]
result[fields[0]].append([e.strip() for e in subject_tax.split(';')])
return result
def tax_assignments_to_consensus_assignments(query_to_assignments,
min_consensus_fraction=0.51):
"""
"""
for query_id, assignments in query_to_assignments.iteritems():
# this call will get cleaned up
consensus_assignment = \
get_consensus_assignment(assignments,
min_consensus_fraction=min_consensus_fraction)
query_to_assignments[query_id] = consensus_assignment
return query_to_assignments
def get_consensus_assignment(assignments, unassignable_label='unassigned',
min_consensus_fraction=0.51):
""" compute the consensus assignment from a list of assignments
This code was pulled almost exactly from QIIME's
UclustConsensusTaxonAssigner._get_consensus_assignment method.
"""
num_input_assignments = len(assignments)
consensus_assignment = []
# if the assignments don't all have the same number
# of levels, the resulting assignment will have a max number
# of levels equal to the number of levels in the assignment
# with the fewest number of levels. this is to avoid
# a case where, for example, there are n assignments, one of
# which has 7 levels, and the other n-1 assignments have 6 levels.
# A 7th level in the result would be misleading because it
# would appear to the user as though it was the consensus
# across all n assignments.
num_levels = min([len(a) for a in assignments])
# iterate over the assignment levels
for level in range(num_levels):
# count the different taxonomic assignments at the current level.
# the counts are computed based on the current level and all higher
# levels to reflect that, for example, 'p__A; c__B; o__C' and
# 'p__X; c__Y; o__C' represent different taxa at the o__ level (since
# they are different at the p__ and c__ levels).
current_level_assignments = \
Counter([tuple(e[:level + 1]) for e in assignments])
# identify the most common taxonomic assignment, and compute the
# fraction of assignments that contained it. it's safe to compute the
# fraction using num_assignments because the deepest level we'll
# ever look at here is num_levels (see above comment on how that
# is decided).
tax, max_count = current_level_assignments.most_common(1)[0]
max_consensus_fraction = max_count / num_input_assignments
# check whether the most common taxonomic assignment is observed
# in at least min_consensus_fraction of the sequences
if max_consensus_fraction >= min_consensus_fraction:
# if so, append the current level only (e.g., 'o__C' if tax is
# 'p__A; c__B; o__C', and continue on to the next level
consensus_assignment.append((tax[-1], max_consensus_fraction))
else:
# if not, there is no assignment at this level, and we're
# done iterating over levels
break
# construct the results
# determine the number of levels in the consensus assignment
consensus_assignment_depth = len(consensus_assignment)
if consensus_assignment_depth > 0:
# if it's greater than 0, generate a list of the
# taxa assignments at each level
assignment_result = [a[0] for a in consensus_assignment]
# and assign the consensus_fraction_result as the
# consensus fraction at the deepest level
consensus_fraction_result = \
consensus_assignment[consensus_assignment_depth - 1][1]
else:
# if there are zero assignments, indicate that the taxa is
# unknown
assignment_result = [unassignable_label]
# and assign the consensus_fraction_result to 1.0 (this is
# somewhat arbitrary, but could be interpreted as all of the
# assignments suggest an unknown taxonomy)
consensus_fraction_result = 1.0
return (
assignment_result, consensus_fraction_result, num_input_assignments
)
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
input_query_fp = opts.input_query_fp
id_to_taxonomy_fp = opts.id_to_taxonomy_fp
reference_seqs_fp = opts.reference_seqs_fp
reference_seqs_idx_fp = opts.reference_seqs_idx_fp
output_dir = opts.output_dir
indexdb_params = opts.indexdb_params or ""
sortmerna_params = opts.sortmerna_params or ""
min_lis = opts.min_lis
best = opts.best
num_alignments = opts.num_alignments
min_consensus_fraction = opts.min_consensus_fraction
min_percent_id = opts.min_percent_id
if (best is None) and (num_alignments is None):
option_parser.error("Either --best or --num_alignments must be passed.")
elif (best is not None) and (num_alignments is not None):
option_parser.error("--best and --num_alignments cannot both be passed.")
else:
pass
create_dir(output_dir)
dirs_to_remove = []
qiime_temp_dir = get_qiime_temp_dir()
output_fp = join(output_dir, 'assignments.tsv')
command_log_fp = join(output_dir, 'cmds.log')
output_f = open(output_fp, 'w')
command_log_f = open(command_log_fp, 'w')
id_to_taxonomy_f = open(id_to_taxonomy_fp, 'U')
id_to_taxonomy_map = \
TaxonAssigner._parse_id_to_taxonomy_file(id_to_taxonomy_f)
id_to_taxonomy_f.close()
if reference_seqs_idx_fp is None:
index_dir = mkdtemp(dir=qiime_temp_dir)
create_dir(index_dir)
dirs_to_remove.append(index_dir)
reference_seqs_idx_fp = join(index_dir, 'ref.idx')
cmd = sortmerna_indexdb(reference_seqs_fp, reference_seqs_idx_fp,
params=indexdb_params)
command_log_f.write(cmd)
command_log_f.write('\n')
blast_output_basename = join(output_dir, 'log')
blast_output_fp = '.'.join([blast_output_basename, 'blast'])
cmd = sortmerna_map(input_query_fp, reference_seqs_fp,
reference_seqs_idx_fp, blast_output_basename,
min_lis=min_lis, best=best, num_alignments=num_alignments,
params=sortmerna_params)
command_log_f.write(cmd)
command_log_f.write('\n')
query_to_assignments = blast_to_tax_assignments(
open(blast_output_fp), id_to_taxonomy_map, min_percent_id=min_percent_id)
results = tax_assignments_to_consensus_assignments(
query_to_assignments, min_consensus_fraction=min_consensus_fraction)
output_f.write('#OTU ID\ttaxonomy\tconfidence\tnum hits\n')
# this is ugly... we need results for all input sequences, but if there
# are no hits from sortmerna for a sequence we won't have it in results.
# we'll want to find a nicer solution than having to iterate over the input
# file. maybe sortmerna can create a failures file...
for query_id, _ in parse_fasta(open(input_query_fp)):
query_id = query_id.split()[0]
assignment = results[query_id]
if len(assignment) == 0:
assignment = [['Unassigned'], 0.0, 0]
output_f.write('\t'.join([query_id, '; '.join(assignment[0]),
str(assignment[1]), str(assignment[2])]))
output_f.write('\n')
# clean up time...
output_f.close()
command_log_f.close()
map(rmtree, dirs_to_remove)
if __name__ == "__main__":
main()
|
bitland/p2pool
|
refs/heads/master
|
wstools/Namespaces.py
|
292
|
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
"""Namespace module, so you don't need PyXML
"""
ident = "$Id$"
try:
from xml.ns import SOAP, SCHEMA, WSDL, XMLNS, DSIG, ENCRYPTION
DSIG.C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
except:
class SOAP:
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
ACTOR_NEXT = "http://schemas.xmlsoap.org/soap/actor/next"
class SCHEMA:
XSD1 = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_LIST = [ XSD1, XSD2, XSD3]
XSI1 = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_LIST = [ XSI1, XSI2, XSI3 ]
BASE = XSD3
class WSDL:
BASE = "http://schemas.xmlsoap.org/wsdl/"
BIND_HTTP = "http://schemas.xmlsoap.org/wsdl/http/"
BIND_MIME = "http://schemas.xmlsoap.org/wsdl/mime/"
BIND_SOAP = "http://schemas.xmlsoap.org/wsdl/soap/"
BIND_SOAP12 = "http://schemas.xmlsoap.org/wsdl/soap12/"
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
HTML = "http://www.w3.org/TR/REC-html40"
class DSIG:
BASE = "http://www.w3.org/2000/09/xmldsig#"
C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
C14N_COMM = "http://www.w3.org/TR/2000/CR-xml-c14n-20010315#WithComments"
C14N_EXCL = "http://www.w3.org/2001/10/xml-exc-c14n#"
DIGEST_MD2 = "http://www.w3.org/2000/09/xmldsig#md2"
DIGEST_MD5 = "http://www.w3.org/2000/09/xmldsig#md5"
DIGEST_SHA1 = "http://www.w3.org/2000/09/xmldsig#sha1"
ENC_BASE64 = "http://www.w3.org/2000/09/xmldsig#base64"
ENVELOPED = "http://www.w3.org/2000/09/xmldsig#enveloped-signature"
HMAC_SHA1 = "http://www.w3.org/2000/09/xmldsig#hmac-sha1"
SIG_DSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#dsa-sha1"
SIG_RSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#rsa-sha1"
XPATH = "http://www.w3.org/TR/1999/REC-xpath-19991116"
XSLT = "http://www.w3.org/TR/1999/REC-xslt-19991116"
class ENCRYPTION:
BASE = "http://www.w3.org/2001/04/xmlenc#"
BLOCK_3DES = "http://www.w3.org/2001/04/xmlenc#des-cbc"
BLOCK_AES128 = "http://www.w3.org/2001/04/xmlenc#aes128-cbc"
BLOCK_AES192 = "http://www.w3.org/2001/04/xmlenc#aes192-cbc"
BLOCK_AES256 = "http://www.w3.org/2001/04/xmlenc#aes256-cbc"
DIGEST_RIPEMD160 = "http://www.w3.org/2001/04/xmlenc#ripemd160"
DIGEST_SHA256 = "http://www.w3.org/2001/04/xmlenc#sha256"
DIGEST_SHA512 = "http://www.w3.org/2001/04/xmlenc#sha512"
KA_DH = "http://www.w3.org/2001/04/xmlenc#dh"
KT_RSA_1_5 = "http://www.w3.org/2001/04/xmlenc#rsa-1_5"
KT_RSA_OAEP = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
STREAM_ARCFOUR = "http://www.w3.org/2001/04/xmlenc#arcfour"
WRAP_3DES = "http://www.w3.org/2001/04/xmlenc#kw-3des"
WRAP_AES128 = "http://www.w3.org/2001/04/xmlenc#kw-aes128"
WRAP_AES192 = "http://www.w3.org/2001/04/xmlenc#kw-aes192"
WRAP_AES256 = "http://www.w3.org/2001/04/xmlenc#kw-aes256"
class WSRF_V1_2:
'''OASIS WSRF Specifications Version 1.2
'''
class LIFETIME:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.xsd"
XSD_DRAFT4 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceLifetime-1.2-draft-04.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.wsdl"
WSDL_DRAFT4 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceLifetime-1.2-draft-04.wsdl"
LATEST = WSDL_DRAFT4
WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT4)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT4)
class PROPERTIES:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.xsd"
XSD_DRAFT5 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceProperties-1.2-draft-05.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.wsdl"
WSDL_DRAFT5 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceProperties-1.2-draft-05.wsdl"
LATEST = WSDL_DRAFT5
WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT5)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT5)
class BASENOTIFICATION:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.wsdl"
LATEST = WSDL_DRAFT1
WSDL_LIST = (WSDL_DRAFT1,)
XSD_LIST = (XSD_DRAFT1,)
class BASEFAULTS:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-BaseFaults-1.2-draft-01.xsd"
XSD_DRAFT3 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-BaseFaults-1.2-draft-03.xsd"
#LATEST = DRAFT3
#WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT3)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT3)
WSRF = WSRF_V1_2
WSRFLIST = (WSRF_V1_2,)
class OASIS:
'''URLs for Oasis specifications
'''
WSSE = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
UTILITY = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
class X509TOKEN:
Base64Binary = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary"
STRTransform = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0"
PKCS7 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#PKCS7"
X509 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509"
X509PKIPathv1 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509PKIPathv1"
X509v3SubjectKeyIdentifier = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3SubjectKeyIdentifier"
LIFETIME = WSRF_V1_2.LIFETIME.XSD_DRAFT1
PROPERTIES = WSRF_V1_2.PROPERTIES.XSD_DRAFT1
BASENOTIFICATION = WSRF_V1_2.BASENOTIFICATION.XSD_DRAFT1
BASEFAULTS = WSRF_V1_2.BASEFAULTS.XSD_DRAFT1
class APACHE:
'''This name space is defined by AXIS and it is used for the TC in TCapache.py,
Map and file attachment (DataHandler)
'''
AXIS_NS = "http://xml.apache.org/xml-soap"
class WSTRUST:
BASE = "http://schemas.xmlsoap.org/ws/2004/04/trust"
ISSUE = "http://schemas.xmlsoap.org/ws/2004/04/trust/Issue"
class WSSE:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/secext"
TRUST = WSTRUST.BASE
class WSU:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/utility"
UTILITY = "http://schemas.xmlsoap.org/ws/2002/07/utility"
class WSR:
PROPERTIES = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceProperties"
LIFETIME = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceLifetime"
class WSA200508:
ADDRESS = "http://www.w3.org/2005/08/addressing"
ANONYMOUS = "%s/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200408:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/08/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200403:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200303:
ADDRESS = "http://schemas.xmlsoap.org/ws/2003/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = None
WSA = WSA200408
WSA_LIST = (WSA200508, WSA200408, WSA200403, WSA200303)
class _WSAW(str):
""" Define ADDRESS attribute to be compatible with WSA* layout """
ADDRESS = property(lambda s: s)
WSAW200605 = _WSAW("http://www.w3.org/2006/05/addressing/wsdl")
WSAW_LIST = (WSAW200605,)
class WSP:
POLICY = "http://schemas.xmlsoap.org/ws/2002/12/policy"
class BEA:
SECCONV = "http://schemas.xmlsoap.org/ws/2004/04/sc"
SCTOKEN = "http://schemas.xmlsoap.org/ws/2004/04/security/sc/sct"
class GLOBUS:
SECCONV = "http://wsrf.globus.org/core/2004/07/security/secconv"
CORE = "http://www.globus.org/namespaces/2004/06/core"
SIG = "http://www.globus.org/2002/04/xmlenc#gssapi-sign"
TOKEN = "http://www.globus.org/ws/2004/09/security/sc#GSSAPI_GSI_TOKEN"
ZSI_SCHEMA_URI = 'http://www.zolera.com/schemas/ZSI/'
|
Datera/cinder
|
refs/heads/datera_queens_backport
|
cinder/tests/unit/test_cmd.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import sys
import time
import ddt
import fixtures
import mock
from oslo_config import cfg
from oslo_db import exception as oslo_exception
from oslo_utils import timeutils
import six
from six.moves import StringIO
try:
import rtslib_fb
except ImportError:
import rtslib as rtslib_fb
from cinder.cmd import api as cinder_api
from cinder.cmd import backup as cinder_backup
from cinder.cmd import manage as cinder_manage
from cinder.cmd import rtstool as cinder_rtstool
from cinder.cmd import scheduler as cinder_scheduler
from cinder.cmd import volume as cinder_volume
from cinder.cmd import volume_usage_audit
from cinder.common import constants
from cinder import context
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_cluster
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_service
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils
from cinder import version
from cinder.volume import rpcapi
CONF = cfg.CONF
class TestCinderApiCmd(test.TestCase):
"""Unit test cases for python modules under cinder/cmd."""
def setUp(self):
super(TestCinderApiCmd, self).setUp()
sys.argv = ['cinder-api']
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, rpc_init, process_launcher,
wsgi_service):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
cinder_api.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
rpc_init.assert_called_once_with(CONF)
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_called_once_with(
server,
workers=server.workers)
launcher.wait.assert_called_once_with()
class TestCinderBackupCmd(test.TestCase):
def setUp(self):
super(TestCinderBackupCmd, self).setUp()
sys.argv = ['cinder-backup']
@mock.patch('cinder.service.wait')
@mock.patch('cinder.service.serve')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create, service_serve,
service_wait):
server = service_create.return_value
cinder_backup.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-backup',
coordination=True)
service_serve.assert_called_once_with(server)
service_wait.assert_called_once_with()
class TestCinderSchedulerCmd(test.TestCase):
def setUp(self):
super(TestCinderSchedulerCmd, self).setUp()
sys.argv = ['cinder-scheduler']
@mock.patch('cinder.service.wait')
@mock.patch('cinder.service.serve')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
service_serve, service_wait):
server = service_create.return_value
cinder_scheduler.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-scheduler')
service_serve.assert_called_once_with(server)
service_wait.assert_called_once_with()
class TestCinderVolumeCmdPosix(test.TestCase):
def setUp(self):
super(TestCinderVolumeCmdPosix, self).setUp()
sys.argv = ['cinder-volume']
self.patch('os.name', 'posix')
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
get_launcher):
CONF.set_override('enabled_backends', None)
self.assertRaises(SystemExit, cinder_volume.main)
self.assertFalse(service_create.called)
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main_with_backends(self, log_setup, monkey_patch, service_create,
get_launcher):
backends = ['', 'backend1', 'backend2', '']
CONF.set_override('enabled_backends', backends)
CONF.set_override('host', 'host')
launcher = get_launcher.return_value
cinder_volume.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
get_launcher.assert_called_once_with()
c1 = mock.call(binary=constants.VOLUME_BINARY, host='host@backend1',
service_name='backend1', coordination=True,
cluster=None)
c2 = mock.call(binary=constants.VOLUME_BINARY, host='host@backend2',
service_name='backend2', coordination=True,
cluster=None)
service_create.assert_has_calls([c1, c2])
self.assertEqual(2, launcher.launch_service.call_count)
launcher.wait.assert_called_once_with()
@ddt.ddt
class TestCinderVolumeCmdWin32(test.TestCase):
def setUp(self):
super(TestCinderVolumeCmdWin32, self).setUp()
sys.argv = ['cinder-volume']
self._mock_win32_proc_launcher = mock.Mock()
self.patch('os.name', 'nt')
self.patch('cinder.service.WindowsProcessLauncher',
lambda *args, **kwargs: self._mock_win32_proc_launcher)
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
get_launcher):
CONF.set_override('enabled_backends', None)
self.assertRaises(SystemExit, cinder_volume.main)
self.assertFalse(service_create.called)
self.assertFalse(self._mock_win32_proc_launcher.called)
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main_invalid_backend(self, log_setup, monkey_patch,
service_create, get_launcher):
CONF.set_override('enabled_backends', 'backend1')
CONF.set_override('backend_name', 'backend2')
self.assertRaises(exception.InvalidInput, cinder_volume.main)
self.assertFalse(service_create.called)
self.assertFalse(self._mock_win32_proc_launcher.called)
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
@ddt.data({},
{'binary_path': 'cinder-volume-script.py',
'exp_py_executable': True})
@ddt.unpack
def test_main_with_multiple_backends(self, log_setup, monkey_patch,
binary_path='cinder-volume',
exp_py_executable=False):
# If multiple backends are used, we expect the Windows process
# launcher to be used in order to create the child processes.
backends = ['', 'backend1', 'backend2', '']
CONF.set_override('enabled_backends', backends)
CONF.set_override('host', 'host')
launcher = self._mock_win32_proc_launcher
# Depending on the setuptools version, '-script.py' and '.exe'
# binary path extensions may be trimmed. We need to take this
# into consideration when building the command that will be
# used to spawn child subprocesses.
sys.argv = [binary_path]
cinder_volume.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
exp_cmd_prefix = [sys.executable] if exp_py_executable else []
exp_cmds = [
exp_cmd_prefix + sys.argv + ['--backend_name=%s' % backend_name]
for backend_name in ['backend1', 'backend2']]
launcher.add_process.assert_has_calls(
[mock.call(exp_cmd) for exp_cmd in exp_cmds])
launcher.wait.assert_called_once_with()
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main_with_multiple_backends_child(
self, log_setup, monkey_patch, service_create, get_launcher):
# We're testing the code expected to be run within child processes.
backends = ['', 'backend1', 'backend2', '']
CONF.set_override('enabled_backends', backends)
CONF.set_override('host', 'host')
launcher = get_launcher.return_value
sys.argv += ['--backend_name', 'backend2']
cinder_volume.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(
binary=constants.VOLUME_BINARY, host='host@backend2',
service_name='backend2', coordination=True,
cluster=None)
launcher.launch_service.assert_called_once_with(
service_create.return_value)
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main_with_single_backend(
self, log_setup, monkey_patch, service_create, get_launcher):
# We're expecting the service to be run within the same process.
CONF.set_override('enabled_backends', ['backend2'])
CONF.set_override('host', 'host')
launcher = get_launcher.return_value
cinder_volume.main()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(
binary=constants.VOLUME_BINARY, host='host@backend2',
service_name='backend2', coordination=True,
cluster=None)
launcher.launch_service.assert_called_once_with(
service_create.return_value)
@ddt.ddt
class TestCinderManageCmd(test.TestCase):
def setUp(self):
super(TestCinderManageCmd, self).setUp()
sys.argv = ['cinder-manage']
def _test_purge_invalid_age_in_days(self, age_in_days):
db_cmds = cinder_manage.DbCommands()
ex = self.assertRaises(SystemExit, db_cmds.purge, age_in_days)
self.assertEqual(1, ex.code)
@mock.patch('cinder.objects.ServiceList.get_all')
@mock.patch('cinder.db.migration.db_sync')
def test_db_commands_sync(self, db_sync, service_get_mock):
version = 11
db_cmds = cinder_manage.DbCommands()
db_cmds.sync(version=version)
db_sync.assert_called_once_with(version)
service_get_mock.assert_not_called()
@mock.patch('cinder.objects.Service.save')
@mock.patch('cinder.objects.ServiceList.get_all')
@mock.patch('cinder.db.migration.db_sync')
def test_db_commands_sync_bump_versions(self, db_sync, service_get_mock,
service_save):
ctxt = context.get_admin_context()
services = [fake_service.fake_service_obj(ctxt,
binary='cinder-' + binary,
rpc_current_version='0.1',
object_current_version='0.2')
for binary in ('volume', 'scheduler', 'backup')]
service_get_mock.return_value = services
version = 11
db_cmds = cinder_manage.DbCommands()
db_cmds.sync(version=version, bump_versions=True)
db_sync.assert_called_once_with(version)
self.assertEqual(3, service_save.call_count)
for service in services:
self.assertEqual(cinder_manage.RPC_VERSIONS[service.binary],
service.rpc_current_version)
self.assertEqual(cinder_manage.OVO_VERSION,
service.object_current_version)
@mock.patch('oslo_db.sqlalchemy.migration.db_version')
def test_db_commands_version(self, db_version):
db_cmds = cinder_manage.DbCommands()
with mock.patch('sys.stdout', new=six.StringIO()):
db_cmds.version()
self.assertEqual(1, db_version.call_count)
def test_db_commands_upgrade_out_of_range(self):
version = 2147483647
db_cmds = cinder_manage.DbCommands()
exit = self.assertRaises(SystemExit, db_cmds.sync, version + 1)
self.assertEqual(1, exit.code)
@mock.patch("oslo_db.sqlalchemy.migration.db_sync")
def test_db_commands_script_not_present(self, db_sync):
db_sync.side_effect = oslo_exception.DBMigrationError(None)
db_cmds = cinder_manage.DbCommands()
exit = self.assertRaises(SystemExit, db_cmds.sync, 101)
self.assertEqual(1, exit.code)
@mock.patch('cinder.cmd.manage.DbCommands.online_migrations',
(mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),))
def test_db_commands_online_data_migrations(self):
db_cmds = cinder_manage.DbCommands()
exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations)
self.assertEqual(0, exit.code)
cinder_manage.DbCommands.online_migrations[0].assert_has_calls(
(mock.call(mock.ANY, 50),) * 2)
def _fake_db_command(self, migrations=None):
if migrations is None:
mock_mig_1 = mock.MagicMock(__name__="mock_mig_1")
mock_mig_2 = mock.MagicMock(__name__="mock_mig_2")
mock_mig_1.return_value = (5, 4)
mock_mig_2.return_value = (6, 6)
migrations = (mock_mig_1, mock_mig_2)
class _CommandSub(cinder_manage.DbCommands):
online_migrations = migrations
return _CommandSub
@mock.patch('cinder.context.get_admin_context')
def test_online_migrations(self, mock_get_context):
self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO()))
ctxt = mock_get_context.return_value
db_cmds = self._fake_db_command()
command = db_cmds()
exit = self.assertRaises(SystemExit,
command.online_data_migrations, 10)
self.assertEqual(1, exit.code)
expected = """\
5 rows matched query mock_mig_1, 4 migrated, 1 remaining
6 rows matched query mock_mig_2, 6 migrated, 0 remaining
+------------+--------------+-----------+
| Migration | Total Needed | Completed |
+------------+--------------+-----------+
| mock_mig_1 | 5 | 4 |
| mock_mig_2 | 6 | 6 |
+------------+--------------+-----------+
"""
command.online_migrations[0].assert_has_calls([mock.call(ctxt,
10)])
command.online_migrations[1].assert_has_calls([mock.call(ctxt,
6)])
self.assertEqual(expected, sys.stdout.getvalue())
@mock.patch('cinder.cmd.manage.DbCommands.online_migrations',
(mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),))
def test_db_commands_online_data_migrations_ignore_state_and_max(self):
db_cmds = cinder_manage.DbCommands()
exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations,
2)
self.assertEqual(1, exit.code)
cinder_manage.DbCommands.online_migrations[0].assert_called_once_with(
mock.ANY, 2)
@mock.patch('cinder.cmd.manage.DbCommands.online_migrations',
(mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),))
def test_db_commands_online_data_migrations_max_negative(self):
db_cmds = cinder_manage.DbCommands()
exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations,
-1)
self.assertEqual(127, exit.code)
cinder_manage.DbCommands.online_migrations[0].assert_not_called()
@mock.patch('cinder.version.version_string')
def test_versions_commands_list(self, version_string):
version_cmds = cinder_manage.VersionCommands()
with mock.patch('sys.stdout', new=six.StringIO()):
version_cmds.list()
version_string.assert_called_once_with()
@mock.patch('cinder.version.version_string')
def test_versions_commands_call(self, version_string):
version_cmds = cinder_manage.VersionCommands()
with mock.patch('sys.stdout', new=six.StringIO()):
version_cmds.__call__()
version_string.assert_called_once_with()
def test_purge_with_negative_age_in_days(self):
age_in_days = -1
self._test_purge_invalid_age_in_days(age_in_days)
def test_purge_exceeded_age_in_days_limit(self):
age_in_days = int(time.time() / 86400) + 1
self._test_purge_invalid_age_in_days(age_in_days)
@mock.patch('cinder.db.sqlalchemy.api.purge_deleted_rows')
@mock.patch('cinder.context.get_admin_context')
def test_purge_less_than_age_in_days_limit(self, get_admin_context,
purge_deleted_rows):
age_in_days = int(time.time() / 86400) - 1
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
is_admin=True)
get_admin_context.return_value = ctxt
purge_deleted_rows.return_value = None
db_cmds = cinder_manage.DbCommands()
db_cmds.purge(age_in_days)
get_admin_context.assert_called_once_with()
purge_deleted_rows.assert_called_once_with(
ctxt, age_in_days=age_in_days)
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_host_commands_list(self, get_admin_context, service_get_all):
get_admin_context.return_value = mock.sentinel.ctxt
service_get_all.return_value = [
{'host': 'fake-host',
'availability_zone': 'fake-az',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ("%(host)-25s\t%(zone)-15s\n" %
{'host': 'host', 'zone': 'zone'})
expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" %
{'host': 'fake-host',
'availability_zone': 'fake-az'})
host_cmds = cinder_manage.HostCommands()
host_cmds.list()
get_admin_context.assert_called_once_with()
service_get_all.assert_called_once_with(mock.sentinel.ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_host_commands_list_with_zone(self, get_admin_context,
service_get_all):
get_admin_context.return_value = mock.sentinel.ctxt
service_get_all.return_value = [
{'host': 'fake-host',
'availability_zone': 'fake-az1',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'},
{'host': 'fake-host',
'availability_zone': 'fake-az2',
'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}]
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ("%(host)-25s\t%(zone)-15s\n" %
{'host': 'host', 'zone': 'zone'})
expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" %
{'host': 'fake-host',
'availability_zone': 'fake-az1'})
host_cmds = cinder_manage.HostCommands()
host_cmds.list(zone='fake-az1')
get_admin_context.assert_called_once_with()
service_get_all.assert_called_once_with(mock.sentinel.ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.get_client')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete(self, rpc_init, get_client,
get_admin_context, volume_get):
ctxt = context.RequestContext('admin', 'fake', True)
get_admin_context.return_value = ctxt
mock_client = mock.MagicMock()
cctxt = mock.MagicMock()
mock_client.prepare.return_value = cctxt
get_client.return_value = mock_client
host = 'fake@host'
db_volume = {'host': host + '#pool1'}
volume = fake_volume.fake_db_volume(**db_volume)
volume_obj = fake_volume.fake_volume_obj(ctxt, **volume)
volume_id = volume['id']
volume_get.return_value = volume
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds._client = mock_client
volume_cmds.delete(volume_id)
volume_get.assert_called_once_with(ctxt, volume_id)
mock_client.prepare.assert_called_once_with(
server="fake",
topic="cinder-volume.fake@host",
version="3.0")
cctxt.cast.assert_called_once_with(
ctxt, 'delete_volume',
cascade=False,
unmanage_only=False,
volume=volume_obj)
@mock.patch('cinder.db.volume_destroy')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete_no_host(self, rpc_init, get_admin_context,
volume_get, volume_destroy):
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
is_admin=True)
get_admin_context.return_value = ctxt
volume = fake_volume.fake_db_volume()
volume_id = volume['id']
volume_get.return_value = volume
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ('Volume not yet assigned to host.\n'
'Deleting volume from database and skipping'
' rpc.\n')
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds.delete(volume_id)
get_admin_context.assert_called_once_with()
volume_get.assert_called_once_with(ctxt, volume_id)
self.assertTrue(volume_destroy.called)
admin_context = volume_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.volume_destroy')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete_volume_in_use(self, rpc_init,
get_admin_context,
volume_get, volume_destroy):
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
db_volume = {'status': 'in-use', 'host': 'fake-host'}
volume = fake_volume.fake_db_volume(**db_volume)
volume_id = volume['id']
volume_get.return_value = volume
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ('Volume is in-use.\n'
'Detach volume from instance and then try'
' again.\n')
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds.delete(volume_id)
volume_get.assert_called_once_with(ctxt, volume_id)
self.assertEqual(expected_out, fake_out.getvalue())
def test_config_commands_list(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ''
for key, value in CONF.items():
expected_out += '%s = %s' % (key, value) + '\n'
config_cmds = cinder_manage.ConfigCommands()
config_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
def test_config_commands_list_param(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
CONF.set_override('host', 'fake')
expected_out = 'host = fake\n'
config_cmds = cinder_manage.ConfigCommands()
config_cmds.list(param='host')
self.assertEqual(expected_out, fake_out.getvalue())
def test_get_log_commands_no_errors(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
CONF.set_override('log_dir', None)
expected_out = 'No errors in logfiles!\n'
get_log_cmds = cinder_manage.GetLogCommands()
get_log_cmds.errors()
out_lines = fake_out.getvalue().splitlines(True)
self.assertTrue(out_lines[0].startswith('DEPRECATED'))
self.assertEqual(expected_out, out_lines[1])
@mock.patch('six.moves.builtins.open')
@mock.patch('os.listdir')
def test_get_log_commands_errors(self, listdir, open):
CONF.set_override('log_dir', 'fake-dir')
listdir.return_value = ['fake-error.log']
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
open.return_value = six.StringIO(
'[ ERROR ] fake-error-message')
expected_out = ['fake-dir/fake-error.log:-\n',
'Line 1 : [ ERROR ] fake-error-message\n']
get_log_cmds = cinder_manage.GetLogCommands()
get_log_cmds.errors()
out_lines = fake_out.getvalue().splitlines(True)
self.assertTrue(out_lines[0].startswith('DEPRECATED'))
self.assertEqual(expected_out[0], out_lines[1])
self.assertEqual(expected_out[1], out_lines[2])
open.assert_called_once_with('fake-dir/fake-error.log', 'r')
listdir.assert_called_once_with(CONF.log_dir)
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.exists')
def test_get_log_commands_syslog_no_log_file(self, path_exists, open):
path_exists.return_value = False
get_log_cmds = cinder_manage.GetLogCommands()
with mock.patch('sys.stdout', new=six.StringIO()):
exit = self.assertRaises(SystemExit, get_log_cmds.syslog)
self.assertEqual(1, exit.code)
path_exists.assert_any_call('/var/log/syslog')
path_exists.assert_any_call('/var/log/messages')
@mock.patch('cinder.db.backup_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_backup_commands_list(self, get_admin_context, backup_get_all):
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
backup = {'id': fake.BACKUP_ID,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'host': 'fake-host',
'display_name': 'fake-display-name',
'container': 'fake-container',
'status': fields.BackupStatus.AVAILABLE,
'size': 123,
'object_count': 1,
'volume_id': fake.VOLUME_ID,
'backup_metadata': {},
}
backup_get_all.return_value = [backup]
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s'
'\t%-12s')
header = hdr % ('ID',
'User ID',
'Project ID',
'Host',
'Name',
'Container',
'Status',
'Size',
'Object Count')
res = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d'
'\t%-12s')
resource = res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
1)
expected_out = header + '\n' + resource + '\n'
backup_cmds = cinder_manage.BackupCommands()
backup_cmds.list()
get_admin_context.assert_called_once_with()
backup_get_all.assert_called_once_with(ctxt, None, None, None,
None, None, None)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.backup_update')
@mock.patch('cinder.db.backup_get_all_by_host')
@mock.patch('cinder.context.get_admin_context')
def test_update_backup_host(self, get_admin_context,
backup_get_by_host,
backup_update):
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
backup = {'id': fake.BACKUP_ID,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'host': 'fake-host',
'display_name': 'fake-display-name',
'container': 'fake-container',
'status': fields.BackupStatus.AVAILABLE,
'size': 123,
'object_count': 1,
'volume_id': fake.VOLUME_ID,
'backup_metadata': {},
}
backup_get_by_host.return_value = [backup]
backup_cmds = cinder_manage.BackupCommands()
backup_cmds.update_backup_host('fake_host', 'fake_host2')
get_admin_context.assert_called_once_with()
backup_get_by_host.assert_called_once_with(ctxt, 'fake_host')
backup_update.assert_called_once_with(ctxt, fake.BACKUP_ID,
{'host': 'fake_host2'})
@mock.patch('cinder.db.consistencygroup_update')
@mock.patch('cinder.db.consistencygroup_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_update_consisgroup_host(self, get_admin_context,
consisgroup_get_all,
consisgroup_update):
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
consisgroup = {'id': fake.CONSISTENCY_GROUP_ID,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'host': 'fake-host',
'status': fields.ConsistencyGroupStatus.AVAILABLE
}
consisgroup_get_all.return_value = [consisgroup]
consisgrup_cmds = cinder_manage.ConsistencyGroupCommands()
consisgrup_cmds.update_cg_host('fake_host', 'fake_host2')
get_admin_context.assert_called_once_with()
consisgroup_get_all.assert_called_once_with(
ctxt, filters={'host': 'fake_host'}, limit=None, marker=None,
offset=None, sort_dirs=None, sort_keys=None)
consisgroup_update.assert_called_once_with(
ctxt, fake.CONSISTENCY_GROUP_ID, {'host': 'fake_host2'})
@mock.patch('cinder.objects.service.Service.is_up',
new_callable=mock.PropertyMock)
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def _test_service_commands_list(self, service, get_admin_context,
service_get_all, service_is_up):
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
service_get_all.return_value = [service]
service_is_up.return_value = True
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s"
print_format = format % ('Binary',
'Host',
'Zone',
'Status',
'State',
'Updated At',
'RPC Version',
'Object Version',
'Cluster')
rpc_version = service['rpc_current_version']
object_version = service['object_current_version']
cluster = service.get('cluster_name', '')
service_format = format % (service['binary'],
service['host'],
service['availability_zone'],
'enabled',
':-)',
service['updated_at'],
rpc_version,
object_version,
cluster)
expected_out = print_format + '\n' + service_format + '\n'
service_cmds = cinder_manage.ServiceCommands()
service_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
get_admin_context.assert_called_with()
service_get_all.assert_called_with(ctxt)
def test_service_commands_list(self):
service = {'binary': 'cinder-binary',
'host': 'fake-host.fake-domain',
'availability_zone': 'fake-zone',
'updated_at': '2014-06-30 11:22:33',
'disabled': False,
'rpc_current_version': '1.1',
'object_current_version': '1.1',
'cluster_name': 'my_cluster',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
for binary in ('volume', 'scheduler', 'backup'):
service['binary'] = 'cinder-%s' % binary
self._test_service_commands_list(service)
def test_service_commands_list_no_updated_at_or_cluster(self):
service = {'binary': 'cinder-binary',
'host': 'fake-host.fake-domain',
'availability_zone': 'fake-zone',
'updated_at': None,
'disabled': False,
'rpc_current_version': '1.1',
'object_current_version': '1.1',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
for binary in ('volume', 'scheduler', 'backup'):
service['binary'] = 'cinder-%s' % binary
self._test_service_commands_list(service)
@ddt.data(('foobar', 'foobar'), ('-foo bar', 'foo bar'),
('--foo bar', 'foo bar'), ('--foo-bar', 'foo_bar'),
('---foo-bar', '_foo_bar'))
@ddt.unpack
def test_get_arg_string(self, arg, expected):
self.assertEqual(expected, cinder_manage.get_arg_string(arg))
def test_fetch_func_args(self):
@cinder_manage.args('--full-rename')
@cinder_manage.args('--different-dest', dest='my_dest')
@cinder_manage.args('current')
def my_func():
pass
expected = {'full_rename': mock.sentinel.full_rename,
'my_dest': mock.sentinel.my_dest,
'current': mock.sentinel.current}
with mock.patch.object(cinder_manage, 'CONF') as mock_conf:
mock_conf.category = mock.Mock(**expected)
self.assertDictEqual(expected,
cinder_manage.fetch_func_args(my_func))
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.db.cluster_get_all')
def tests_cluster_commands_list(self, get_all_mock, get_admin_mock,
):
now = timeutils.utcnow()
cluster = fake_cluster.fake_cluster_orm(num_hosts=4, num_down_hosts=2,
created_at=now,
last_heartbeat=now)
get_all_mock.return_value = [cluster]
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_mock.return_value = ctxt
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
format_ = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s"
print_format = format_ % ('Name',
'Binary',
'Status',
'State',
'Heartbeat',
'Hosts',
'Down Hosts',
'Updated At')
cluster_format = format_ % (cluster.name, cluster.binary,
'enabled', ':-)',
cluster.last_heartbeat,
cluster.num_hosts,
cluster.num_down_hosts,
None)
expected_out = print_format + '\n' + cluster_format + '\n'
cluster_cmds = cinder_manage.ClusterCommands()
cluster_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
get_admin_mock.assert_called_with()
get_all_mock.assert_called_with(ctxt, is_up=None,
get_services=False,
services_summary=True,
read_deleted='no')
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True)
@mock.patch('cinder.context.get_admin_context')
def test_cluster_commands_remove_not_found(self, admin_ctxt_mock,
cluster_get_mock):
cluster_get_mock.side_effect = exception.ClusterNotFound(id=1)
cluster_commands = cinder_manage.ClusterCommands()
exit = cluster_commands.remove(False, 'abinary', 'acluster')
self.assertEqual(2, exit)
cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value,
None, name='acluster',
binary='abinary',
get_services=False)
@mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True)
@mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True)
@mock.patch('cinder.context.get_admin_context')
def test_cluster_commands_remove_fail_has_hosts(self, admin_ctxt_mock,
cluster_get_mock,
cluster_destroy_mock,
service_destroy_mock):
cluster = fake_cluster.fake_cluster_ovo(mock.Mock())
cluster_get_mock.return_value = cluster
cluster_destroy_mock.side_effect = exception.ClusterHasHosts(id=1)
cluster_commands = cinder_manage.ClusterCommands()
exit = cluster_commands.remove(False, 'abinary', 'acluster')
self.assertEqual(2, exit)
cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value,
None, name='acluster',
binary='abinary',
get_services=False)
cluster_destroy_mock.assert_called_once_with(
admin_ctxt_mock.return_value.elevated.return_value, cluster.id)
service_destroy_mock.assert_not_called()
@mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True)
@mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True)
@mock.patch('cinder.context.get_admin_context')
def test_cluster_commands_remove_success_no_hosts(self, admin_ctxt_mock,
cluster_get_mock,
cluster_destroy_mock,
service_destroy_mock):
cluster = fake_cluster.fake_cluster_orm()
cluster_get_mock.return_value = cluster
cluster_commands = cinder_manage.ClusterCommands()
exit = cluster_commands.remove(False, 'abinary', 'acluster')
self.assertIsNone(exit)
cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value,
None, name='acluster',
binary='abinary',
get_services=False)
cluster_destroy_mock.assert_called_once_with(
admin_ctxt_mock.return_value.elevated.return_value, cluster.id)
service_destroy_mock.assert_not_called()
@mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True)
@mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True)
@mock.patch('cinder.context.get_admin_context')
def test_cluster_commands_remove_recursive(self, admin_ctxt_mock,
cluster_get_mock,
cluster_destroy_mock,
service_destroy_mock):
cluster = fake_cluster.fake_cluster_orm()
cluster.services = [fake_service.fake_service_orm()]
cluster_get_mock.return_value = cluster
cluster_commands = cinder_manage.ClusterCommands()
exit = cluster_commands.remove(True, 'abinary', 'acluster')
self.assertIsNone(exit)
cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value,
None, name='acluster',
binary='abinary',
get_services=True)
cluster_destroy_mock.assert_called_once_with(
admin_ctxt_mock.return_value.elevated.return_value, cluster.id)
service_destroy_mock.assert_called_once_with(
admin_ctxt_mock.return_value.elevated.return_value,
cluster.services[0]['id'])
@mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster',
auto_specs=True, return_value=1)
@mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster',
auto_specs=True, return_value=2)
@mock.patch('cinder.context.get_admin_context')
def test_cluster_commands_rename(self, admin_ctxt_mock,
volume_include_mock, cg_include_mock):
"""Test that cluster rename changes volumes and cgs."""
current_cluster_name = mock.sentinel.old_cluster_name
new_cluster_name = mock.sentinel.new_cluster_name
partial = mock.sentinel.partial
cluster_commands = cinder_manage.ClusterCommands()
exit = cluster_commands.rename(partial, current_cluster_name,
new_cluster_name)
self.assertIsNone(exit)
volume_include_mock.assert_called_once_with(
admin_ctxt_mock.return_value, new_cluster_name, partial,
cluster_name=current_cluster_name)
cg_include_mock.assert_called_once_with(
admin_ctxt_mock.return_value, new_cluster_name, partial,
cluster_name=current_cluster_name)
@mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster',
auto_specs=True, return_value=0)
@mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster',
auto_specs=True, return_value=0)
@mock.patch('cinder.context.get_admin_context')
def test_cluster_commands_rename_no_changes(self, admin_ctxt_mock,
volume_include_mock,
cg_include_mock):
"""Test that we return an error when cluster rename has no effect."""
cluster_commands = cinder_manage.ClusterCommands()
exit = cluster_commands.rename(False, 'cluster', 'new_cluster')
self.assertEqual(2, exit)
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_argv_lt_2(self, register_cli_opt):
script_name = 'cinder-manage'
sys.argv = [script_name]
CONF(sys.argv[1:], project='cinder', version=version.version_string())
with mock.patch('sys.stdout', new=six.StringIO()):
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
self.assertEqual(2, exit.code)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_log.log.setup')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_sudo_failed(self, register_cli_opt, log_setup,
config_opts_call):
script_name = 'cinder-manage'
sys.argv = [script_name, 'fake_category', 'fake_action']
config_opts_call.side_effect = cfg.ConfigFilesNotFoundError(
mock.sentinel._namespace)
with mock.patch('sys.stdout', new=six.StringIO()):
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder',
version=version.version_string())
self.assertFalse(log_setup.called)
self.assertEqual(2, exit.code)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main(self, register_cli_opt, config_opts_call):
script_name = 'cinder-manage'
sys.argv = [script_name, 'config', 'list']
action_fn = mock.MagicMock()
CONF.category = mock.MagicMock(action_fn=action_fn)
cinder_manage.main()
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder', version=version.version_string())
self.assertTrue(action_fn.called)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_log.log.setup')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_invalid_dir(self, register_cli_opt, log_setup,
config_opts_call):
script_name = 'cinder-manage'
fake_dir = 'fake-dir'
invalid_dir = 'Invalid directory:'
sys.argv = [script_name, '--config-dir', fake_dir]
config_opts_call.side_effect = cfg.ConfigDirNotFoundError(fake_dir)
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder',
version=version.version_string())
self.assertIn(invalid_dir, fake_out.getvalue())
self.assertIn(fake_dir, fake_out.getvalue())
self.assertFalse(log_setup.called)
self.assertEqual(2, exit.code)
@mock.patch('cinder.db')
def test_remove_service_failure(self, mock_db):
mock_db.service_destroy.side_effect = SystemExit(1)
service_commands = cinder_manage.ServiceCommands()
exit = service_commands.remove('abinary', 'ahost')
self.assertEqual(2, exit)
@mock.patch('cinder.db.service_destroy')
@mock.patch(
'cinder.db.service_get',
return_value = {'id': '12',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
def test_remove_service_success(self, mock_get_by_args,
mock_service_destroy):
service_commands = cinder_manage.ServiceCommands()
self.assertIsNone(service_commands.remove('abinary', 'ahost'))
class TestCinderRtstoolCmd(test.TestCase):
def setUp(self):
super(TestCinderRtstoolCmd, self).setUp()
sys.argv = ['cinder-rtstool']
self.INITIATOR_IQN = 'iqn.2015.12.com.example.openstack.i:UNIT1'
self.TARGET_IQN = 'iqn.2015.12.com.example.openstack.i:TARGET1'
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_create_rtslib_error(self, rtsroot):
rtsroot.side_effect = rtslib_fb.utils.RTSLibError()
with mock.patch('sys.stdout', new=six.StringIO()):
self.assertRaises(rtslib_fb.utils.RTSLibError,
cinder_rtstool.create,
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
def _test_create_rtslib_error_network_portal(self, ip):
with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \
mock.patch.object(rtslib_fb, 'LUN') as lun, \
mock.patch.object(rtslib_fb, 'TPG') as tpg, \
mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \
mock.patch.object(rtslib_fb, 'Target') as target, \
mock.patch.object(rtslib_fb, 'BlockStorageObject') as \
block_storage_object, \
mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root:
root_new = mock.MagicMock(storage_objects=mock.MagicMock())
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
if ip == '0.0.0.0':
network_portal.side_effect = rtslib_fb.utils.RTSLibError()
self.assertRaises(rtslib_fb.utils.RTSLibError,
cinder_rtstool.create,
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
else:
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
rts_root.assert_called_once_with()
block_storage_object.assert_called_once_with(
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
'1')
lun.assert_called_once_with(tpg_new,
storage_object=mock.sentinel.so_new)
self.assertEqual(1, tpg_new.enable)
if ip == '::0':
ip = '[::0]'
network_portal.assert_any_call(tpg_new, ip, 3260, mode='any')
def test_create_rtslib_error_network_portal_ipv4(self):
with mock.patch('sys.stdout', new=six.StringIO()):
self._test_create_rtslib_error_network_portal('0.0.0.0')
def test_create_rtslib_error_network_portal_ipv6(self):
with mock.patch('sys.stdout', new=six.StringIO()):
self._test_create_rtslib_error_network_portal('::0')
def _test_create(self, ip):
with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \
mock.patch.object(rtslib_fb, 'LUN') as lun, \
mock.patch.object(rtslib_fb, 'TPG') as tpg, \
mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \
mock.patch.object(rtslib_fb, 'Target') as target, \
mock.patch.object(rtslib_fb, 'BlockStorageObject') as \
block_storage_object, \
mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root:
root_new = mock.MagicMock(storage_objects=mock.MagicMock())
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
rts_root.assert_called_once_with()
block_storage_object.assert_called_once_with(
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
'1')
lun.assert_called_once_with(tpg_new,
storage_object=mock.sentinel.so_new)
self.assertEqual(1, tpg_new.enable)
if ip == '::0':
ip = '[::0]'
network_portal.assert_any_call(tpg_new, ip, 3260, mode='any')
def test_create_ipv4(self):
self._test_create('0.0.0.0')
def test_create_ipv6(self):
self._test_create('::0')
def _test_create_ips_and_port(self, mock_rtslib, port, ips, expected_ips):
mock_rtslib.BlockStorageObject.return_value = mock.sentinel.bso
mock_rtslib.Target.return_value = mock.sentinel.target_new
mock_rtslib.FabricModule.return_value = mock.sentinel.iscsi_fabric
tpg_new = mock_rtslib.TPG.return_value
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
portals_ips=ips,
portals_port=port)
mock_rtslib.Target.assert_called_once_with(mock.sentinel.iscsi_fabric,
mock.sentinel.name,
'create')
mock_rtslib.TPG.assert_called_once_with(mock.sentinel.target_new,
mode='create')
mock_rtslib.LUN.assert_called_once_with(
tpg_new,
storage_object=mock.sentinel.bso)
mock_rtslib.NetworkPortal.assert_has_calls(
map(lambda ip: mock.call(tpg_new, ip, port, mode='any'),
expected_ips), any_order=True
)
@mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True)
def test_create_ips_and_port_ipv4(self, mock_rtslib):
ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
port = 3261
self._test_create_ips_and_port(mock_rtslib, port, ips, ips)
@mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True)
def test_create_ips_and_port_ipv6(self, mock_rtslib):
ips = ['fe80::fc16:3eff:fecb:ad2f']
expected_ips = ['[fe80::fc16:3eff:fecb:ad2f]']
port = 3261
self._test_create_ips_and_port(mock_rtslib, port, ips,
expected_ips)
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_add_initiator_rtslib_error(self, rtsroot):
rtsroot.side_effect = rtslib_fb.utils.RTSLibError()
with mock.patch('sys.stdout', new=six.StringIO()):
self.assertRaises(rtslib_fb.utils.RTSLibError,
cinder_rtstool.add_initiator,
mock.sentinel.target_iqn,
self.INITIATOR_IQN,
mock.sentinel.userid,
mock.sentinel.password)
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_add_initiator_rtstool_error(self, rtsroot):
rtsroot.targets.return_value = {}
self.assertRaises(cinder_rtstool.RtstoolError,
cinder_rtstool.add_initiator,
mock.sentinel.target_iqn,
self.INITIATOR_IQN,
mock.sentinel.userid,
mock.sentinel.password)
@mock.patch.object(rtslib_fb, 'MappedLUN')
@mock.patch.object(rtslib_fb, 'NodeACL')
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_add_initiator_acl_exists(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': self.INITIATOR_IQN}]
acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = iter([tpg])
target = mock.MagicMock(tpgs=tpgs, wwn=self.TARGET_IQN)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.add_initiator(self.TARGET_IQN,
self.INITIATOR_IQN,
mock.sentinel.userid,
mock.sentinel.password)
self.assertFalse(node_acl.called)
self.assertFalse(mapped_lun.called)
@mock.patch.object(rtslib_fb, 'MappedLUN')
@mock.patch.object(rtslib_fb, 'NodeACL')
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_add_initiator_acl_exists_case_1(self,
rtsroot,
node_acl,
mapped_lun):
"""Ensure initiator iqns are handled in a case-insensitive manner."""
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': self.INITIATOR_IQN.lower()}]
acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = iter([tpg])
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.add_initiator(target_iqn,
self.INITIATOR_IQN,
mock.sentinel.userid,
mock.sentinel.password)
self.assertFalse(node_acl.called)
self.assertFalse(mapped_lun.called)
@mock.patch.object(rtslib_fb, 'MappedLUN')
@mock.patch.object(rtslib_fb, 'NodeACL')
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_add_initiator_acl_exists_case_2(self,
rtsroot,
node_acl,
mapped_lun):
"""Ensure initiator iqns are handled in a case-insensitive manner."""
iqn_lower = self.INITIATOR_IQN.lower()
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': self.INITIATOR_IQN}]
acl = mock.MagicMock(node_wwn=iqn_lower)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = iter([tpg])
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.add_initiator(target_iqn,
self.INITIATOR_IQN,
mock.sentinel.userid,
mock.sentinel.password)
self.assertFalse(node_acl.called)
self.assertFalse(mapped_lun.called)
@mock.patch.object(rtslib_fb, 'MappedLUN')
@mock.patch.object(rtslib_fb, 'NodeACL')
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_add_initiator(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': self.INITIATOR_IQN}]
tpg = mock.MagicMock()
tpgs = iter([tpg])
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
acl_new = mock.MagicMock(chap_userid=mock.sentinel.userid,
chap_password=mock.sentinel.password)
node_acl.return_value = acl_new
cinder_rtstool.add_initiator(target_iqn,
self.INITIATOR_IQN,
mock.sentinel.userid,
mock.sentinel.password)
node_acl.assert_called_once_with(tpg,
self.INITIATOR_IQN,
mode='create')
mapped_lun.assert_called_once_with(acl_new, 0, tpg_lun=0)
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_get_targets(self, rtsroot):
target = mock.MagicMock()
target.dump.return_value = {'wwn': 'fake-wwn'}
rtsroot.return_value = mock.MagicMock(targets=[target])
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
cinder_rtstool.get_targets()
self.assertEqual(str(target.wwn), fake_out.getvalue().strip())
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_delete(self, rtsroot):
target = mock.MagicMock(wwn=mock.sentinel.iqn)
storage_object = mock.MagicMock()
name = mock.PropertyMock(return_value=mock.sentinel.iqn)
type(storage_object).name = name
rtsroot.return_value = mock.MagicMock(
targets=[target], storage_objects=[storage_object])
cinder_rtstool.delete(mock.sentinel.iqn)
target.delete.assert_called_once_with()
storage_object.delete.assert_called_once_with()
@mock.patch.object(rtslib_fb, 'MappedLUN')
@mock.patch.object(rtslib_fb, 'NodeACL')
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_delete_initiator(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': self.INITIATOR_IQN}]
acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = iter([tpg])
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.delete_initiator(target_iqn,
self.INITIATOR_IQN)
@mock.patch.object(rtslib_fb, 'MappedLUN')
@mock.patch.object(rtslib_fb, 'NodeACL')
@mock.patch.object(rtslib_fb.root, 'RTSRoot')
def test_delete_initiator_case(self, rtsroot, node_acl, mapped_lun):
"""Ensure iqns are handled in a case-insensitive manner."""
initiator_iqn_lower = self.INITIATOR_IQN.lower()
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': initiator_iqn_lower}]
acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = iter([tpg])
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.delete_initiator(target_iqn,
self.INITIATOR_IQN)
@mock.patch.object(cinder_rtstool, 'os', autospec=True)
@mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True)
def test_save_with_filename(self, mock_rtslib, mock_os):
filename = mock.sentinel.filename
cinder_rtstool.save_to_file(filename)
rtsroot = mock_rtslib.root.RTSRoot
rtsroot.assert_called_once_with()
self.assertEqual(0, mock_os.path.dirname.call_count)
self.assertEqual(0, mock_os.path.exists.call_count)
self.assertEqual(0, mock_os.makedirs.call_count)
rtsroot.return_value.save_to_file.assert_called_once_with(filename)
@mock.patch.object(cinder_rtstool, 'os',
**{'path.exists.return_value': True,
'path.dirname.return_value': mock.sentinel.dirname})
@mock.patch.object(cinder_rtstool, 'rtslib_fb',
**{'root.default_save_file': mock.sentinel.filename})
def test_save(self, mock_rtslib, mock_os):
"""Test that we check path exists with default file."""
cinder_rtstool.save_to_file(None)
rtsroot = mock_rtslib.root.RTSRoot
rtsroot.assert_called_once_with()
rtsroot.return_value.save_to_file.assert_called_once_with(
mock.sentinel.filename)
mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename)
mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname)
self.assertEqual(0, mock_os.makedirs.call_count)
@mock.patch.object(cinder_rtstool, 'os',
**{'path.exists.return_value': False,
'path.dirname.return_value': mock.sentinel.dirname})
@mock.patch.object(cinder_rtstool, 'rtslib_fb',
**{'root.default_save_file': mock.sentinel.filename})
def test_save_no_targetcli(self, mock_rtslib, mock_os):
"""Test that we create path if it doesn't exist with default file."""
cinder_rtstool.save_to_file(None)
rtsroot = mock_rtslib.root.RTSRoot
rtsroot.assert_called_once_with()
rtsroot.return_value.save_to_file.assert_called_once_with(
mock.sentinel.filename)
mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename)
mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname)
mock_os.makedirs.assert_called_once_with(mock.sentinel.dirname, 0o755)
@mock.patch.object(cinder_rtstool, 'os', autospec=True)
@mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True)
def test_save_error_creating_dir(self, mock_rtslib, mock_os):
mock_os.path.dirname.return_value = 'dirname'
mock_os.path.exists.return_value = False
mock_os.makedirs.side_effect = OSError('error')
regexp = (r'targetcli not installed and could not create default '
r'directory \(dirname\): error$')
self.assertRaisesRegexp(cinder_rtstool.RtstoolError, regexp,
cinder_rtstool.save_to_file, None)
@mock.patch.object(cinder_rtstool, 'os', autospec=True)
@mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True)
def test_save_error_saving(self, mock_rtslib, mock_os):
save = mock_rtslib.root.RTSRoot.return_value.save_to_file
save.side_effect = OSError('error')
regexp = r'Could not save configuration to myfile: error'
self.assertRaisesRegexp(cinder_rtstool.RtstoolError, regexp,
cinder_rtstool.save_to_file, 'myfile')
@mock.patch.object(cinder_rtstool, 'rtslib_fb',
**{'root.default_save_file': mock.sentinel.filename})
def test_restore(self, mock_rtslib):
"""Test that we restore target configuration with default file."""
cinder_rtstool.restore_from_file(None)
rtsroot = mock_rtslib.root.RTSRoot
rtsroot.assert_called_once_with()
rtsroot.return_value.restore_from_file.assert_called_once_with(
mock.sentinel.filename)
@mock.patch.object(cinder_rtstool, 'rtslib_fb')
def test_restore_with_file(self, mock_rtslib):
"""Test that we restore target configuration with specified file."""
cinder_rtstool.restore_from_file('saved_file')
rtsroot = mock_rtslib.root.RTSRoot
rtsroot.return_value.restore_from_file.assert_called_once_with(
'saved_file')
@mock.patch('cinder.cmd.rtstool.restore_from_file')
def test_restore_error(self, restore_from_file):
"""Test that we fail to restore target configuration."""
restore_from_file.side_effect = OSError
self.assertRaises(OSError,
cinder_rtstool.restore_from_file,
mock.sentinel.filename)
def test_usage(self):
with mock.patch('sys.stdout', new=six.StringIO()):
exit = self.assertRaises(SystemExit, cinder_rtstool.usage)
self.assertEqual(1, exit.code)
@mock.patch('cinder.cmd.rtstool.usage')
def test_main_argc_lt_2(self, usage):
usage.side_effect = SystemExit(1)
sys.argv = ['cinder-rtstool']
exit = self.assertRaises(SystemExit, cinder_rtstool.usage)
self.assertTrue(usage.called)
self.assertEqual(1, exit.code)
def test_main_create_argv_lt_6(self):
sys.argv = ['cinder-rtstool', 'create']
self._test_main_check_argv()
def test_main_create_argv_gt_7(self):
sys.argv = ['cinder-rtstool', 'create', 'fake-arg1', 'fake-arg2',
'fake-arg3', 'fake-arg4', 'fake-arg5', 'fake-arg6']
self._test_main_check_argv()
def test_main_add_initiator_argv_lt_6(self):
sys.argv = ['cinder-rtstool', 'add-initiator']
self._test_main_check_argv()
def test_main_delete_argv_lt_3(self):
sys.argv = ['cinder-rtstool', 'delete']
self._test_main_check_argv()
def test_main_no_action(self):
sys.argv = ['cinder-rtstool']
self._test_main_check_argv()
def _test_main_check_argv(self):
with mock.patch('cinder.cmd.rtstool.usage') as usage:
usage.side_effect = SystemExit(1)
sys.argv = ['cinder-rtstool', 'create']
exit = self.assertRaises(SystemExit, cinder_rtstool.main)
self.assertTrue(usage.called)
self.assertEqual(1, exit.code)
@mock.patch('cinder.cmd.rtstool.save_to_file')
def test_main_save(self, mock_save):
sys.argv = ['cinder-rtstool',
'save']
rc = cinder_rtstool.main()
mock_save.assert_called_once_with(None)
self.assertEqual(0, rc)
@mock.patch('cinder.cmd.rtstool.save_to_file')
def test_main_save_with_file(self, mock_save):
sys.argv = ['cinder-rtstool',
'save',
mock.sentinel.filename]
rc = cinder_rtstool.main()
mock_save.assert_called_once_with(mock.sentinel.filename)
self.assertEqual(0, rc)
def test_main_create(self):
with mock.patch('cinder.cmd.rtstool.create') as create:
sys.argv = ['cinder-rtstool',
'create',
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
str(mock.sentinel.initiator_iqns)]
rc = cinder_rtstool.main()
create.assert_called_once_with(
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
initiator_iqns=str(mock.sentinel.initiator_iqns))
self.assertEqual(0, rc)
@mock.patch('cinder.cmd.rtstool.create')
def test_main_create_ips_and_port(self, mock_create):
sys.argv = ['cinder-rtstool',
'create',
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
str(mock.sentinel.initiator_iqns),
'-p3261',
'-aip1,ip2,ip3']
rc = cinder_rtstool.main()
mock_create.assert_called_once_with(
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
initiator_iqns=str(mock.sentinel.initiator_iqns),
portals_ips=['ip1', 'ip2', 'ip3'],
portals_port=3261)
self.assertEqual(0, rc)
def test_main_add_initiator(self):
with mock.patch('cinder.cmd.rtstool.add_initiator') as add_initiator:
sys.argv = ['cinder-rtstool',
'add-initiator',
mock.sentinel.target_iqn,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.initiator_iqns]
rc = cinder_rtstool.main()
add_initiator.assert_called_once_with(
mock.sentinel.target_iqn, mock.sentinel.initiator_iqns,
mock.sentinel.userid, mock.sentinel.password)
self.assertEqual(0, rc)
def test_main_get_targets(self):
with mock.patch('cinder.cmd.rtstool.get_targets') as get_targets:
sys.argv = ['cinder-rtstool', 'get-targets']
rc = cinder_rtstool.main()
get_targets.assert_called_once_with()
self.assertEqual(0, rc)
def test_main_delete(self):
with mock.patch('cinder.cmd.rtstool.delete') as delete:
sys.argv = ['cinder-rtstool', 'delete', mock.sentinel.iqn]
rc = cinder_rtstool.main()
delete.assert_called_once_with(mock.sentinel.iqn)
self.assertEqual(0, rc)
@mock.patch.object(cinder_rtstool, 'verify_rtslib')
def test_main_verify(self, mock_verify_rtslib):
sys.argv = ['cinder-rtstool', 'verify']
rc = cinder_rtstool.main()
mock_verify_rtslib.assert_called_once_with()
self.assertEqual(0, rc)
class TestCinderVolumeUsageAuditCmd(test.TestCase):
def setUp(self):
super(TestCinderVolumeUsageAuditCmd, self).setUp()
sys.argv = ['cinder-volume-usage-audit']
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_time_error(self, get_admin_context, log_setup, get_logger,
version_string, rpc_init,
last_completed_audit_period):
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2013-01-01 01:00:00')
last_completed_audit_period.return_value = (mock.sentinel.begin,
mock.sentinel.end)
exit = self.assertRaises(SystemExit, volume_usage_audit.main)
get_admin_context.assert_called_once_with()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
self.assertEqual(-1, exit.code)
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_create_volume_error(self, get_admin_context, log_setup,
get_logger, version_string,
rpc_init,
last_completed_audit_period,
volume_get_all_active_by_window,
notify_about_volume_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC)
end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID,
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_all_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
def _notify_about_volume_usage(*args, **kwargs):
if 'create.end' in args:
raise Exception()
else:
pass
notify_about_volume_usage.side_effect = _notify_about_volume_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_all_active_by_window.assert_called_once_with(ctxt, begin,
end)
notify_about_volume_usage.assert_has_calls([
mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info),
mock.call(ctxt, volume1, 'create.start',
extra_usage_info=local_extra_info),
mock.call(ctxt, volume1, 'create.end',
extra_usage_info=local_extra_info)
])
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_delete_volume_error(self, get_admin_context, log_setup,
get_logger, version_string,
rpc_init,
last_completed_audit_period,
volume_get_all_active_by_window,
notify_about_volume_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC)
end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID,
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_all_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(volume1.deleted_at),
'audit_period_ending': str(volume1.deleted_at),
}
def _notify_about_volume_usage(*args, **kwargs):
if 'delete.end' in args:
raise Exception()
else:
pass
notify_about_volume_usage.side_effect = _notify_about_volume_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_all_active_by_window.assert_called_once_with(ctxt, begin,
end)
notify_about_volume_usage.assert_has_calls([
mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info),
mock.call(ctxt, volume1, 'create.start',
extra_usage_info=local_extra_info_create),
mock.call(ctxt, volume1, 'create.end',
extra_usage_info=local_extra_info_create),
mock.call(ctxt, volume1, 'delete.start',
extra_usage_info=local_extra_info_delete),
mock.call(ctxt, volume1, 'delete.end',
extra_usage_info=local_extra_info_delete)
])
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
@mock.patch('cinder.objects.snapshot.SnapshotList.'
'get_all_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_snapshot_error(self, get_admin_context,
log_setup, get_logger,
version_string, rpc_init,
last_completed_audit_period,
volume_get_all_active_by_window,
notify_about_volume_usage,
snapshot_get_all_active_by_window,
notify_about_snapshot_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC)
end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
snapshot1 = mock.MagicMock(id=fake.VOLUME_ID,
project_id=fake.PROJECT_ID,
created_at=snapshot1_created,
deleted_at=snapshot1_deleted)
volume_get_all_active_by_window.return_value = []
snapshot_get_all_active_by_window.return_value = [snapshot1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(snapshot1.created_at),
'audit_period_ending': str(snapshot1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(snapshot1.deleted_at),
'audit_period_ending': str(snapshot1.deleted_at),
}
def _notify_about_snapshot_usage(*args, **kwargs):
# notify_about_snapshot_usage raises an exception, but does not
# block
raise Exception()
notify_about_snapshot_usage.side_effect = _notify_about_snapshot_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_all_active_by_window.assert_called_once_with(ctxt, begin,
end)
self.assertFalse(notify_about_volume_usage.called)
notify_about_snapshot_usage.assert_has_calls([
mock.call(ctxt, snapshot1, 'exists', extra_info),
mock.call(ctxt, snapshot1, 'create.start',
extra_usage_info=local_extra_info_create),
mock.call(ctxt, snapshot1, 'delete.start',
extra_usage_info=local_extra_info_delete)
])
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('cinder.objects.backup.BackupList.get_all_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_backup_error(self, get_admin_context,
version_string, rpc_init,
last_completed_audit_period,
volume_get_all_active_by_window,
notify_about_volume_usage,
backup_get_all_active_by_window,
notify_about_backup_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC)
end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
backup1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
backup1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
backup1 = mock.MagicMock(id=fake.BACKUP_ID,
project_id=fake.PROJECT_ID,
created_at=backup1_created,
deleted_at=backup1_deleted)
volume_get_all_active_by_window.return_value = []
backup_get_all_active_by_window.return_value = [backup1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(backup1.created_at),
'audit_period_ending': str(backup1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(backup1.deleted_at),
'audit_period_ending': str(backup1.deleted_at),
}
notify_about_backup_usage.side_effect = Exception()
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_all_active_by_window.assert_called_once_with(ctxt,
begin, end)
self.assertFalse(notify_about_volume_usage.called)
notify_about_backup_usage.assert_any_call(ctxt, backup1, 'exists',
extra_info)
notify_about_backup_usage.assert_any_call(
ctxt, backup1, 'create.start',
extra_usage_info=local_extra_info_create)
notify_about_backup_usage.assert_any_call(
ctxt, backup1, 'delete.start',
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('cinder.objects.backup.BackupList.get_all_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
@mock.patch('cinder.objects.snapshot.SnapshotList.'
'get_all_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main(self, get_admin_context, log_setup, get_logger,
version_string, rpc_init, last_completed_audit_period,
volume_get_all_active_by_window, notify_about_volume_usage,
snapshot_get_all_active_by_window,
notify_about_snapshot_usage, backup_get_all_active_by_window,
notify_about_backup_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC)
end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID,
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_all_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
extra_info_volume_create = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
extra_info_volume_delete = {
'audit_period_beginning': str(volume1.deleted_at),
'audit_period_ending': str(volume1.deleted_at),
}
snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
snapshot1 = mock.MagicMock(id=fake.VOLUME_ID,
project_id=fake.PROJECT_ID,
created_at=snapshot1_created,
deleted_at=snapshot1_deleted)
snapshot_get_all_active_by_window.return_value = [snapshot1]
extra_info_snapshot_create = {
'audit_period_beginning': str(snapshot1.created_at),
'audit_period_ending': str(snapshot1.created_at),
}
extra_info_snapshot_delete = {
'audit_period_beginning': str(snapshot1.deleted_at),
'audit_period_ending': str(snapshot1.deleted_at),
}
backup1_created = datetime.datetime(2014, 1, 1, 2, 0,
tzinfo=iso8601.UTC)
backup1_deleted = datetime.datetime(2014, 1, 1, 3, 0,
tzinfo=iso8601.UTC)
backup1 = mock.MagicMock(id=fake.BACKUP_ID,
project_id=fake.PROJECT_ID,
created_at=backup1_created,
deleted_at=backup1_deleted)
backup_get_all_active_by_window.return_value = [backup1]
extra_info_backup_create = {
'audit_period_beginning': str(backup1.created_at),
'audit_period_ending': str(backup1.created_at),
}
extra_info_backup_delete = {
'audit_period_beginning': str(backup1.deleted_at),
'audit_period_ending': str(backup1.deleted_at),
}
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual('cinder', CONF.project)
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_all_active_by_window.assert_called_once_with(ctxt,
begin, end)
notify_about_volume_usage.assert_has_calls([
mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info),
mock.call(ctxt, volume1, 'create.start',
extra_usage_info=extra_info_volume_create),
mock.call(ctxt, volume1, 'create.end',
extra_usage_info=extra_info_volume_create),
mock.call(ctxt, volume1, 'delete.start',
extra_usage_info=extra_info_volume_delete),
mock.call(ctxt, volume1, 'delete.end',
extra_usage_info=extra_info_volume_delete)
])
notify_about_snapshot_usage.assert_has_calls([
mock.call(ctxt, snapshot1, 'exists', extra_info),
mock.call(ctxt, snapshot1, 'create.start',
extra_usage_info=extra_info_snapshot_create),
mock.call(ctxt, snapshot1, 'create.end',
extra_usage_info=extra_info_snapshot_create),
mock.call(ctxt, snapshot1, 'delete.start',
extra_usage_info=extra_info_snapshot_delete),
mock.call(ctxt, snapshot1, 'delete.end',
extra_usage_info=extra_info_snapshot_delete)
])
notify_about_backup_usage.assert_has_calls([
mock.call(ctxt, backup1, 'exists', extra_info),
mock.call(ctxt, backup1, 'create.start',
extra_usage_info=extra_info_backup_create),
mock.call(ctxt, backup1, 'create.end',
extra_usage_info=extra_info_backup_create),
mock.call(ctxt, backup1, 'delete.start',
extra_usage_info=extra_info_backup_delete),
mock.call(ctxt, backup1, 'delete.end',
extra_usage_info=extra_info_backup_delete)
])
class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
"""Unit tests for cinder.db.api.service_*."""
def setUp(self):
super(TestVolumeSharedTargetsOnlineMigration, self).setUp()
def _get_minimum_rpc_version_mock(ctxt, binary):
binary_map = {
'cinder-volume': rpcapi.VolumeAPI,
}
return binary_map[binary].RPC_API_VERSION
self.patch('cinder.objects.Service.get_minimum_rpc_version',
side_effect=_get_minimum_rpc_version_mock)
ctxt = context.get_admin_context()
# default value in db for shared_targets on a volume
# is True, so don't need to set it here explicitly
for i in range(3):
sqlalchemy_api.volume_create(
ctxt,
{'host': 'host1@lvm-driver1#lvm-driver1',
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
values = {
'host': 'host1@lvm-driver1',
'binary': constants.VOLUME_BINARY,
'topic': constants.VOLUME_TOPIC,
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
utils.create_service(ctxt, values)
self.ctxt = ctxt
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.8')
def test_shared_targets_migrations(self, mock_version):
"""Ensure we can update the column."""
# Run the migration and verify that we updated 1 entry
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
return_value={'connection_protocol': 'iSCSI',
'shared_targets': False}):
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 10))
self.assertEqual(3, total)
self.assertEqual(3, updated)
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.8')
def test_shared_targets_migrations_non_iscsi(self, mock_version):
"""Ensure we can update the column."""
# Run the migration and verify that we updated 1 entry
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
return_value={'connection_protocol': 'RBD'}):
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 10))
self.assertEqual(3, total)
self.assertEqual(3, updated)
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.8')
def test_shared_targets_migrations_with_limit(self, mock_version):
"""Ensure we update in batches."""
# Run the migration and verify that we updated 1 entry
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
return_value={'connection_protocol': 'iSCSI',
'shared_targets': False}):
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 2))
self.assertEqual(3, total)
self.assertEqual(2, updated)
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 2))
self.assertEqual(1, total)
self.assertEqual(1, updated)
|
constanthatz/django_tutorial
|
refs/heads/master
|
mysite/polls/models.py
|
1
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self): # __unicode__ on Python 2
return unicode(self.question_text)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self): # __unicode__ on Python 2
return unicode(self.choice_text)
|
xingrz/android_kernel_pifoundation_bcm2710
|
refs/heads/mkn-mr1
|
tools/perf/scripts/python/netdev-times.py
|
1544
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
manazhao/tf_recsys
|
refs/heads/r1.0
|
tensorflow/contrib/tensor_forest/client/__init__.py
|
164
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.client import random_forest
# pylint: enable=unused-import
|
flibbertigibbet/open-transit-indicators
|
refs/heads/develop
|
python/django/transit_indicators/migrations/0041_auto_20141104_1737.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('transit_indicators', '0040_migrate_version_data'),
]
operations = [
migrations.AddField(
model_name='otiindicatorsconfig',
name='arrive_by_time_s',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='indicator',
unique_together=set([('sample_period', 'type', 'aggregation', 'route_id', 'route_type', 'calculation_job')]),
),
]
|
0-wiz-0/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg-amp.lv2/waflib/Node.py
|
196
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,sys,shutil
from waflib import Utils,Errors
exclude_regs='''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.intlcache
**/.DS_Store'''
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret=path.split('/')[2:]
ret[0]='/'+ret[0]
return ret
return path.split('/')
re_sp=re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret=re.split(re_sp,path)[2:]
ret[0]='\\'+ret[0]
return ret
return re.split(re_sp,path)
if sys.platform=='cygwin':
split_path=split_path_cygwin
elif Utils.is_win32:
split_path=split_path_win32
class Node(object):
__slots__=('name','sig','children','parent','cache_abspath','cache_isdir','cache_sig')
def __init__(self,name,parent):
self.name=name
self.parent=parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already'%(name,parent))
parent.children[name]=self
def __setstate__(self,data):
self.name=data[0]
self.parent=data[1]
if data[2]is not None:
self.children=data[2]
if data[3]is not None:
self.sig=data[3]
def __getstate__(self):
return(self.name,self.parent,getattr(self,'children',None),getattr(self,'sig',None))
def __str__(self):
return self.name
def __repr__(self):
return self.abspath()
def __hash__(self):
return id(self)
def __eq__(self,node):
return id(self)==id(node)
def __copy__(self):
raise Errors.WafError('nodes are not supposed to be copied')
def read(self,flags='r',encoding='ISO8859-1'):
return Utils.readf(self.abspath(),flags,encoding)
def write(self,data,flags='w',encoding='ISO8859-1'):
Utils.writef(self.abspath(),data,flags,encoding)
def chmod(self,val):
os.chmod(self.abspath(),val)
def delete(self):
try:
if hasattr(self,'children'):
shutil.rmtree(self.abspath())
else:
os.remove(self.abspath())
except OSError:
pass
self.evict()
def evict(self):
del self.parent.children[self.name]
def suffix(self):
k=max(0,self.name.rfind('.'))
return self.name[k:]
def height(self):
d=self
val=-1
while d:
d=d.parent
val+=1
return val
def listdir(self):
lst=Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
if getattr(self,'cache_isdir',None):
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s'%self.abspath())
try:
self.children
except AttributeError:
self.children={}
self.cache_isdir=True
def find_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
try:
ch=cur.children
except AttributeError:
cur.children={}
else:
try:
cur=cur.children[x]
continue
except KeyError:
pass
cur=self.__class__(x,cur)
try:
os.stat(cur.abspath())
except OSError:
cur.evict()
return None
ret=cur
try:
os.stat(ret.abspath())
except OSError:
ret.evict()
return None
try:
while not getattr(cur.parent,'cache_isdir',None):
cur=cur.parent
cur.cache_isdir=True
except AttributeError:
pass
return ret
def make_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
if getattr(cur,'children',{}):
if x in cur.children:
cur=cur.children[x]
continue
else:
cur.children={}
cur=self.__class__(x,cur)
return cur
def search_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
else:
try:
cur=cur.children[x]
except(AttributeError,KeyError):
return None
return cur
def path_from(self,node):
c1=self
c2=node
c1h=c1.height()
c2h=c2.height()
lst=[]
up=0
while c1h>c2h:
lst.append(c1.name)
c1=c1.parent
c1h-=1
while c2h>c1h:
up+=1
c2=c2.parent
c2h-=1
while id(c1)!=id(c2):
lst.append(c1.name)
up+=1
c1=c1.parent
c2=c2.parent
for i in range(up):
lst.append('..')
lst.reverse()
return os.sep.join(lst)or'.'
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if os.sep=='/':
if not self.parent:
val=os.sep
elif not self.parent.name:
val=os.sep+self.name
else:
val=self.parent.abspath()+os.sep+self.name
else:
if not self.parent:
val=''
elif not self.parent.name:
val=self.name+os.sep
else:
val=self.parent.abspath().rstrip(os.sep)+os.sep+self.name
self.cache_abspath=val
return val
def is_child_of(self,node):
p=self
diff=self.height()-node.height()
while diff>0:
diff-=1
p=p.parent
return id(p)==id(node)
def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True):
dircont=self.listdir()
dircont.sort()
try:
lst=set(self.children.keys())
except AttributeError:
self.children={}
else:
if remove:
for x in lst-set(dircont):
self.children[x].evict()
for name in dircont:
npats=accept(name,pats)
if npats and npats[0]:
accepted=[]in npats[0]
node=self.make_node([name])
isdir=os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node,'cache_isdir',None)or isdir:
node.cache_isdir=True
if maxdepth:
for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src,remove=remove):
yield k
raise StopIteration
def ant_glob(self,*k,**kw):
src=kw.get('src',True)
dir=kw.get('dir',False)
excl=kw.get('excl',exclude_regs)
incl=k and k[0]or kw.get('incl','**')
reflags=kw.get('ignorecase',0)and re.I
def to_pat(s):
lst=Utils.to_list(s)
ret=[]
for x in lst:
x=x.replace('\\','/').replace('//','/')
if x.endswith('/'):
x+='**'
lst2=x.split('/')
accu=[]
for k in lst2:
if k=='**':
accu.append(k)
else:
k=k.replace('.','[.]').replace('*','.*').replace('?','.').replace('+','\\+')
k='^%s$'%k
try:
accu.append(re.compile(k,flags=reflags))
except Exception ,e:
raise Errors.WafError("Invalid pattern: %s"%k,e)
ret.append(accu)
return ret
def filtre(name,nn):
ret=[]
for lst in nn:
if not lst:
pass
elif lst[0]=='**':
ret.append(lst)
if len(lst)>1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name,pats):
nacc=filtre(name,pats[0])
nrej=filtre(name,pats[1])
if[]in nrej:
nacc=[]
return[nacc,nrej]
ret=[x for x in self.ant_iter(accept=accept,pats=[to_pat(incl),to_pat(excl)],maxdepth=kw.get('maxdepth',25),dir=dir,src=src,remove=kw.get('remove',True))]
if kw.get('flat',False):
return' '.join([x.path_from(self)for x in ret])
return ret
def is_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return False
if id(cur)==x:
return True
cur=cur.parent
return False
def is_bld(self):
cur=self
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return True
cur=cur.parent
return False
def get_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur)==x:
return self
lst.append(cur.name)
cur=cur.parent
return self
def get_bld(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
return self
if id(cur)==x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur=cur.parent
lst.reverse()
if lst and Utils.is_win32 and len(lst[0])==2 and lst[0].endswith(':'):
lst[0]=lst[0][0]
return self.ctx.bldnode.make_node(['__root__']+lst)
def find_resource(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search_node(lst)
if not node:
self=self.get_src()
node=self.find_node(lst)
if node:
if os.path.isdir(node.abspath()):
return None
return node
def find_or_declare(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
node.parent.mkdir()
return node
self=self.get_src()
node=self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
node.parent.mkdir()
return node
node=self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except(OSError,AttributeError):
return None
return node
def change_ext(self,ext,ext_in=None):
name=self.name
if ext_in is None:
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
else:
name=name[:-len(ext_in)]+ext
return self.parent.find_or_declare([name])
def nice_path(self,env=None):
return self.path_from(self.ctx.launch_node())
def bldpath(self):
return self.path_from(self.ctx.bldnode)
def srcpath(self):
return self.path_from(self.ctx.srcnode)
def relpath(self):
cur=self
x=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==x:
return self.bldpath()
cur=cur.parent
return self.srcpath()
def bld_dir(self):
return self.parent.bldpath()
def bld_base(self):
s=os.path.splitext(self.name)[0]
return self.bld_dir()+os.sep+s
def get_bld_sig(self):
try:
return self.cache_sig
except AttributeError:
pass
if not self.is_bld()or self.ctx.bldnode is self.ctx.srcnode:
self.sig=Utils.h_file(self.abspath())
self.cache_sig=ret=self.sig
return ret
search=search_node
pickle_lock=Utils.threading.Lock()
class Nod3(Node):
pass
|
hfreitas92/portfolio
|
refs/heads/master
|
node_modules/node-sass/node_modules/node-gyp/gyp/setup.py
|
2462
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='[email protected]',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
docker/compose
|
refs/heads/master
|
compose/cli/main.py
|
2
|
import contextlib
import functools
import json
import logging
import pipes
import re
import subprocess
import sys
from distutils.spawn import find_executable
from inspect import getdoc
from operator import attrgetter
import docker.errors
import docker.utils
from . import errors
from . import signals
from .. import __version__
from ..config import ConfigurationError
from ..config import parse_environment
from ..config import parse_labels
from ..config import resolve_build_args
from ..config.environment import Environment
from ..config.serialize import serialize_config
from ..config.types import VolumeSpec
from ..const import IS_LINUX_PLATFORM
from ..const import IS_WINDOWS_PLATFORM
from ..errors import StreamParseError
from ..metrics.decorator import metrics
from ..parallel import ParallelStreamWriter
from ..progress_stream import StreamOutputError
from ..project import get_image_digests
from ..project import MissingDigests
from ..project import NoSuchService
from ..project import OneOffFilter
from ..project import ProjectError
from ..service import BuildAction
from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import ImageType
from ..service import NeedsBuildError
from ..service import OperationFailedError
from ..utils import filter_attached_for_up
from .colors import AnsiMode
from .command import get_config_from_options
from .command import get_project_dir
from .command import project_from_options
from .docopt_command import DocoptDispatcher
from .docopt_command import get_handler
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import ConsoleWarningFormatter
from .formatter import Formatter
from .log_printer import build_log_presenters
from .log_printer import LogPrinter
from .utils import get_version_info
from .utils import human_readable_file_size
from .utils import yesno
from compose.metrics.client import MetricsCommand
from compose.metrics.client import Status
if not IS_WINDOWS_PLATFORM:
from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation
log = logging.getLogger(__name__)
def main(): # noqa: C901
signals.ignore_sigpipe()
command = None
try:
_, opts, command = DocoptDispatcher.get_command_and_options(
TopLevelCommand,
get_filtered_args(sys.argv[1:]),
{'options_first': True, 'version': get_version_info('compose')})
except Exception:
pass
try:
command_func = dispatch()
command_func()
if not IS_LINUX_PLATFORM and command == 'help':
print("\nDocker Compose is now in the Docker CLI, try `docker compose` help")
except (KeyboardInterrupt, signals.ShutdownException):
exit_with_metrics(command, "Aborting.", status=Status.CANCELED)
except (UserError, NoSuchService, ConfigurationError,
ProjectError, OperationFailedError) as e:
exit_with_metrics(command, e.msg, status=Status.FAILURE)
except BuildError as e:
reason = ""
if e.reason:
reason = " : " + e.reason
exit_with_metrics(command,
"Service '{}' failed to build{}".format(e.service.name, reason),
status=Status.FAILURE)
except StreamOutputError as e:
exit_with_metrics(command, e, status=Status.FAILURE)
except NeedsBuildError as e:
exit_with_metrics(command,
"Service '{}' needs to be built, but --no-build was passed.".format(
e.service.name), status=Status.FAILURE)
except NoSuchCommand as e:
commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
if not IS_LINUX_PLATFORM:
commands += "\n\nDocker Compose is now in the Docker CLI, try `docker compose`"
exit_with_metrics("", log_msg="No such command: {}\n\n{}".format(
e.command, commands), status=Status.FAILURE)
except (errors.ConnectionError, StreamParseError):
exit_with_metrics(command, status=Status.FAILURE)
except SystemExit as e:
status = Status.SUCCESS
if len(sys.argv) > 1 and '--help' not in sys.argv:
status = Status.FAILURE
if command and len(sys.argv) >= 3 and sys.argv[2] == '--help':
command = '--help ' + command
if not command and len(sys.argv) >= 2 and sys.argv[1] == '--help':
command = '--help'
msg = e.args[0] if len(e.args) else ""
code = 0
if isinstance(e.code, int):
code = e.code
if not IS_LINUX_PLATFORM and not command:
msg += "\n\nDocker Compose is now in the Docker CLI, try `docker compose`"
exit_with_metrics(command, log_msg=msg, status=status,
exit_code=code)
def get_filtered_args(args):
if args[0] in ('-h', '--help'):
return []
if args[0] == '--version':
return ['version']
def exit_with_metrics(command, log_msg=None, status=Status.SUCCESS, exit_code=1):
if log_msg and command != 'exec':
if not exit_code:
log.info(log_msg)
else:
log.error(log_msg)
MetricsCommand(command, status=status).send_metrics()
sys.exit(exit_code)
def dispatch():
console_stream = sys.stderr
console_handler = logging.StreamHandler(console_stream)
setup_logging(console_handler)
dispatcher = DocoptDispatcher(
TopLevelCommand,
{'options_first': True, 'version': get_version_info('compose')})
options, handler, command_options = dispatcher.parse(sys.argv[1:])
ansi_mode = AnsiMode.AUTO
try:
if options.get("--ansi"):
ansi_mode = AnsiMode(options.get("--ansi"))
except ValueError:
raise UserError(
'Invalid value for --ansi: {}. Expected one of {}.'.format(
options.get("--ansi"),
', '.join(m.value for m in AnsiMode)
)
)
if options.get("--no-ansi"):
if options.get("--ansi"):
raise UserError("--no-ansi and --ansi cannot be combined.")
log.warning('--no-ansi option is deprecated and will be removed in future versions. '
'Use `--ansi never` instead.')
ansi_mode = AnsiMode.NEVER
setup_console_handler(console_handler,
options.get('--verbose'),
ansi_mode.use_ansi_codes(console_handler.stream),
options.get("--log-level"))
setup_parallel_logger(ansi_mode)
if ansi_mode is AnsiMode.NEVER:
command_options['--no-color'] = True
return functools.partial(perform_command, options, handler, command_options)
def perform_command(options, handler, command_options):
if options['COMMAND'] in ('help', 'version'):
# Skip looking up the compose file.
handler(command_options)
return
if options['COMMAND'] == 'config':
command = TopLevelCommand(None, options=options)
handler(command, command_options)
return
project = project_from_options('.', options)
command = TopLevelCommand(project, options=options)
with errors.handle_connection_errors(project.client):
handler(command, command_options)
def setup_logging(console_handler):
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests and docker-py logging
logging.getLogger("urllib3").propagate = False
logging.getLogger("requests").propagate = False
logging.getLogger("docker").propagate = False
def setup_parallel_logger(ansi_mode):
ParallelStreamWriter.set_default_ansi_mode(ansi_mode)
def setup_console_handler(handler, verbose, use_console_formatter=True, level=None):
if use_console_formatter:
format_class = ConsoleWarningFormatter
else:
format_class = logging.Formatter
if verbose:
handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
loglevel = logging.DEBUG
else:
handler.setFormatter(format_class())
loglevel = logging.INFO
if level is not None:
levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
loglevel = levels.get(level.upper())
if loglevel is None:
raise UserError(
'Invalid value for --log-level. Expected one of DEBUG, INFO, WARNING, ERROR, CRITICAL.'
)
handler.setLevel(loglevel)
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand:
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [-f <arg>...] [--profile <name>...] [options] [--] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file
(default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name
(default: directory name)
--profile NAME Specify a profile to enable
-c, --context NAME Specify a context name
--verbose Show more output
--log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
--ansi (never|always|auto) Control when to print ANSI control characters
--no-ansi Do not print ANSI control characters (DEPRECATED)
-v, --version Print version and exit
-H, --host HOST Daemon socket to connect to
--tls Use TLS; implied by --tlsverify
--tlscacert CA_PATH Trust certs signed only by this CA
--tlscert CLIENT_CERT_PATH Path to TLS certificate file
--tlskey TLS_KEY_PATH Path to TLS key file
--tlsverify Use TLS and verify the remote
--skip-hostname-check Don't check the daemon's hostname against the
name specified in the client certificate
--project-directory PATH Specify an alternate working directory
(default: the path of the Compose file)
--compatibility If set, Compose will attempt to convert keys
in v3 files to their non-Swarm equivalent (DEPRECATED)
--env-file PATH Specify an alternate environment file
Commands:
build Build or rebuild services
config Validate and view the Compose file
create Create services
down Stop and remove resources
events Receive real time events from containers
exec Execute a command in a running container
help Get help on a command
images List images
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
pull Pull service images
push Push service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
top Display the running processes
unpause Unpause services
up Create and start containers
version Show version information and quit
"""
def __init__(self, project, options=None):
self.project = project
self.toplevel_options = options or {}
@property
def project_dir(self):
return get_project_dir(self.toplevel_options)
@property
def toplevel_environment(self):
environment_file = self.toplevel_options.get('--env-file')
return Environment.from_env_file(self.project_dir, environment_file)
@metrics()
def build(self, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [--build-arg key=val...] [--] [SERVICE...]
Options:
--build-arg key=val Set build-time variables for services.
--compress Compress the build context using gzip.
--force-rm Always remove intermediate containers.
-m, --memory MEM Set memory limit for the build container.
--no-cache Do not use cache when building the image.
--no-rm Do not remove intermediate containers after a successful build.
--parallel Build images in parallel.
--progress string Set type of progress output (auto, plain, tty).
--pull Always attempt to pull a newer version of the image.
-q, --quiet Don't print anything to STDOUT
"""
service_names = options['SERVICE']
build_args = options.get('--build-arg', None)
if build_args:
if not service_names and docker.utils.version_lt(self.project.client.api_version, '1.25'):
raise UserError(
'--build-arg is only supported when services are specified for API version < 1.25.'
' Please use a Compose file version > 2.2 or specify which services to build.'
)
build_args = resolve_build_args(build_args, self.toplevel_environment)
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD', True)
self.project.build(
service_names=options['SERVICE'],
no_cache=bool(options.get('--no-cache', False)),
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)),
memory=options.get('--memory'),
rm=not bool(options.get('--no-rm', False)),
build_args=build_args,
gzip=options.get('--compress', False),
parallel_build=options.get('--parallel', False),
silent=options.get('--quiet', False),
cli=native_builder,
progress=options.get('--progress'),
)
@metrics()
def config(self, options):
"""
Validate and view the Compose file.
Usage: config [options]
Options:
--resolve-image-digests Pin image tags to digests.
--no-interpolate Don't interpolate environment variables.
-q, --quiet Only validate the configuration, don't print
anything.
--profiles Print the profile names, one per line.
--services Print the service names, one per line.
--volumes Print the volume names, one per line.
--hash="*" Print the service config hash, one per line.
Set "service1,service2" for a list of specified services
or use the wildcard symbol to display all services.
"""
additional_options = {'--no-interpolate': options.get('--no-interpolate')}
compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
image_digests = None
if options['--resolve-image-digests']:
self.project = project_from_options('.', self.toplevel_options, additional_options)
with errors.handle_connection_errors(self.project.client):
image_digests = image_digests_for_project(self.project)
if options['--quiet']:
return
if options['--profiles']:
profiles = set()
for service in compose_config.services:
if 'profiles' in service:
for profile in service['profiles']:
profiles.add(profile)
print('\n'.join(sorted(profiles)))
return
if options['--services']:
print('\n'.join(service['name'] for service in compose_config.services))
return
if options['--volumes']:
print('\n'.join(volume for volume in compose_config.volumes))
return
if options['--hash'] is not None:
h = options['--hash']
self.project = project_from_options('.', self.toplevel_options, additional_options)
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
with errors.handle_connection_errors(self.project.client):
for service in self.project.get_services(services):
print('{} {}'.format(service.name, service.config_hash))
return
print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
@metrics()
def create(self, options):
"""
Creates containers for a service.
This command is deprecated. Use the `up` command with `--no-start` instead.
Usage: create [options] [SERVICE...]
Options:
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing.
--build Build images before creating containers.
"""
service_names = options['SERVICE']
log.warning(
'The create command is deprecated. '
'Use the up command with the --no-start flag instead.'
)
self.project.create(
service_names=service_names,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
)
@metrics()
def down(self, options):
"""
Stops containers and removes containers, networks, volumes, and images
created by `up`.
By default, the only things removed are:
- Containers for services defined in the Compose file
- Networks defined in the `networks` section of the Compose file
- The default network, if one is used
Networks and volumes defined as `external` are never removed.
Usage: down [options]
Options:
--rmi type Remove images. Type must be one of:
'all': Remove all images used by any service.
'local': Remove only images that don't have a
custom tag set by the `image` field.
-v, --volumes Remove named volumes declared in the `volumes`
section of the Compose file and anonymous volumes
attached to containers.
--remove-orphans Remove containers for services not defined in the
Compose file
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and options['--remove-orphans']:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
image_type = image_type_from_opt('--rmi', options['--rmi'])
timeout = timeout_from_opts(options)
self.project.down(
image_type,
options['--volumes'],
options['--remove-orphans'],
timeout=timeout,
ignore_orphans=ignore_orphans)
def events(self, options):
"""
Receive real time events from containers.
Usage: events [options] [--] [SERVICE...]
Options:
--json Output events as a stream of json objects
"""
def format_event(event):
attributes = ["%s=%s" % item for item in event['attributes'].items()]
return ("{time} {type} {action} {id} ({attrs})").format(
attrs=", ".join(sorted(attributes)),
**event)
def json_format_event(event):
event['time'] = event['time'].isoformat()
event.pop('container')
return json.dumps(event)
for event in self.project.events():
formatter = json_format_event if options['--json'] else format_event
print(formatter(event))
sys.stdout.flush()
@metrics("exec")
def exec_command(self, options):
"""
Execute a command in a running container
Usage: exec [options] [-e KEY=VAL...] [--] SERVICE COMMAND [ARGS...]
Options:
-d, --detach Detached mode: Run command in the background.
--privileged Give extended privileges to the process.
-u, --user USER Run the command as this user.
-T Disable pseudo-tty allocation. By default `docker-compose exec`
allocates a TTY.
--index=index index of the container if there are multiple
instances of a service [default: 1]
-e, --env KEY=VAL Set environment variables (can be used multiple times,
not supported in API < 1.25)
-w, --workdir DIR Path to workdir directory for this command.
"""
use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
detach = options.get('--detach')
if options['--env'] and docker.utils.version_lt(self.project.client.api_version, '1.25'):
raise UserError("Setting environment for exec is not supported in API < 1.25 (%s)"
% self.project.client.api_version)
if options['--workdir'] and docker.utils.version_lt(self.project.client.api_version, '1.35'):
raise UserError("Setting workdir for exec is not supported in API < 1.35 (%s)"
% self.project.client.api_version)
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
command = [options['COMMAND']] + options['ARGS']
tty = not options["-T"]
if IS_WINDOWS_PLATFORM or use_cli and not detach:
sys.exit(call_docker(
build_exec_command(options, container.id, command),
self.toplevel_options, self.toplevel_environment)
)
create_exec_options = {
"privileged": options["--privileged"],
"user": options["--user"],
"tty": tty,
"stdin": True,
"workdir": options["--workdir"],
}
if docker.utils.version_gte(self.project.client.api_version, '1.25'):
create_exec_options["environment"] = options["--env"]
exec_id = container.create_exec(command, **create_exec_options)
if detach:
container.start_exec(exec_id, tty=tty, stream=True)
return
signals.set_signal_handler_to_shutdown()
try:
operation = ExecOperation(
self.project.client,
exec_id,
interactive=tty,
)
pty = PseudoTerminal(self.project.client, operation)
pty.start()
except signals.ShutdownException:
log.info("received shutdown exception: closing")
exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode")
sys.exit(exit_code)
@classmethod
@metrics()
def help(cls, options):
"""
Get help on a command.
Usage: help [COMMAND]
"""
if options['COMMAND']:
subject = get_handler(cls, options['COMMAND'])
else:
subject = cls
print(getdoc(subject))
@metrics()
def images(self, options):
"""
List images used by the created containers.
Usage: images [options] [--] [SERVICE...]
Options:
-q, --quiet Only display IDs
"""
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
if options['--quiet']:
for image in {c.image for c in containers}:
print(image.split(':')[1])
return
def add_default_tag(img_name):
if ':' not in img_name.split('/')[-1]:
return '{}:latest'.format(img_name)
return img_name
headers = [
'Container',
'Repository',
'Tag',
'Image Id',
'Size'
]
rows = []
for container in containers:
image_config = container.image_config
service = self.project.get_service(container.service)
index = 0
img_name = add_default_tag(service.image_name)
if img_name in image_config['RepoTags']:
index = image_config['RepoTags'].index(img_name)
repo_tags = (
image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags']
else ('<none>', '<none>')
)
image_id = image_config['Id'].split(':')[1][:12]
size = human_readable_file_size(image_config['Size'])
rows.append([
container.name,
repo_tags[0],
repo_tags[1],
image_id,
size
])
print(Formatter.table(headers, rows))
@metrics()
def kill(self, options):
"""
Force stop service containers.
Usage: kill [options] [--] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
self.project.kill(service_names=options['SERVICE'], signal=signal)
@metrics()
def logs(self, options):
"""
View output from containers.
Usage: logs [options] [--] [SERVICE...]
Options:
--no-color Produce monochrome output.
-f, --follow Follow log output.
-t, --timestamps Show timestamps.
--tail="all" Number of lines to show from the end of the logs
for each container.
--no-log-prefix Don't print prefix in logs.
"""
containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
tail = options['--tail']
if tail is not None:
if tail.isdigit():
tail = int(tail)
elif tail != 'all':
raise UserError("tail flag must be all or a number")
log_args = {
'follow': options['--follow'],
'tail': tail,
'timestamps': options['--timestamps']
}
print("Attaching to", list_containers(containers))
log_printer_from_project(
self.project,
containers,
options['--no-color'],
log_args,
event_stream=self.project.events(service_names=options['SERVICE']),
keep_prefix=not options['--no-log-prefix']).run()
@metrics()
def pause(self, options):
"""
Pause services.
Usage: pause [SERVICE...]
"""
containers = self.project.pause(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to pause', 1)
@metrics()
def port(self, options):
"""
Print the public port for a port binding.
Usage: port [options] [--] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
@metrics()
def ps(self, options):
"""
List containers.
Usage: ps [options] [--] [SERVICE...]
Options:
-q, --quiet Only display IDs
--services Display services
--filter KEY=VAL Filter services by a property
-a, --all Show all stopped containers (including those created by the run command)
"""
if options['--quiet'] and options['--services']:
raise UserError('--quiet and --services cannot be combined')
if options['--services']:
filt = build_filter(options.get('--filter'))
services = self.project.services
if filt:
services = filter_services(filt, services, self.project)
print('\n'.join(service.name for service in services))
return
if options['--all']:
containers = sorted(self.project.containers(service_names=options['SERVICE'],
one_off=OneOffFilter.include, stopped=True),
key=attrgetter('name'))
else:
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
if options['--quiet']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter.table(headers, rows))
@metrics()
def pull(self, options):
"""
Pulls images for services defined in a Compose file, but does not start the containers.
Usage: pull [options] [--] [SERVICE...]
Options:
--ignore-pull-failures Pull what it can and ignores images with pull failures.
--parallel Deprecated, pull multiple images in parallel (enabled by default).
--no-parallel Disable parallel pulling.
-q, --quiet Pull without printing progress information
--include-deps Also pull services declared as dependencies
"""
if options.get('--parallel'):
log.warning('--parallel option is deprecated and will be removed in future versions.')
self.project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures'),
parallel_pull=not options.get('--no-parallel'),
silent=options.get('--quiet'),
include_deps=options.get('--include-deps'),
)
@metrics()
def push(self, options):
"""
Pushes images for services.
Usage: push [options] [--] [SERVICE...]
Options:
--ignore-push-failures Push what it can and ignores images with push failures.
"""
self.project.push(
service_names=options['SERVICE'],
ignore_push_failures=options.get('--ignore-push-failures')
)
@metrics()
def rm(self, options):
"""
Removes stopped service containers.
By default, anonymous volumes attached to containers will not be removed. You
can override this with `-v`. To list all volumes, use `docker volume ls`.
Any data which is not in a volume will be lost.
Usage: rm [options] [--] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-s, --stop Stop the containers, if required, before removing
-v Remove any anonymous volumes attached to containers
-a, --all Deprecated - no effect.
"""
if options.get('--all'):
log.warning(
'--all flag is obsolete. This is now the default behavior '
'of `docker-compose rm`'
)
one_off = OneOffFilter.include
if options.get('--stop'):
self.project.stop(service_names=options['SERVICE'], one_off=one_off)
all_containers = self.project.containers(
service_names=options['SERVICE'], stopped=True, one_off=one_off
)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
self.project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False),
one_off=one_off
)
else:
print("No stopped containers")
@metrics()
def run(self, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage:
run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l KEY=VALUE...] [--]
SERVICE [COMMAND] [ARGS...]
Options:
-d, --detach Detached mode: Run container in the background, print
new container name.
--name NAME Assign a name to the container
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-l, --label KEY=VAL Add or override a label (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
--use-aliases Use the service's network aliases in the network(s) the
container connects to.
-v, --volume=[] Bind mount a volume (default [])
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
-w, --workdir="" Working directory inside the container
"""
service = self.project.get_service(options['SERVICE'])
detach = options.get('--detach')
if options['--publish'] and options['--service-ports']:
raise UserError(
'Service port mapping and manual port mapping '
'can not be used together'
)
if options['COMMAND'] is not None:
command = [options['COMMAND']] + options['ARGS']
elif options['--entrypoint'] is not None:
command = []
else:
command = service.options.get('command')
options['stdin_open'] = service.options.get('stdin_open', True)
container_options = build_one_off_container_options(options, detach, command)
run_one_off_container(
container_options, self.project, service, options,
self.toplevel_options, self.toplevel_environment
)
@metrics()
def scale(self, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
This command is deprecated. Use the up command with the `--scale` flag
instead.
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = timeout_from_opts(options)
log.warning(
'The scale command is deprecated. '
'Use the up command with the --scale flag instead.'
)
for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
self.project.get_service(service_name).scale(num, timeout=timeout)
@metrics()
def start(self, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
containers = self.project.start(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to start', 1)
@metrics()
def stop(self, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [--] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = timeout_from_opts(options)
self.project.stop(service_names=options['SERVICE'], timeout=timeout)
@metrics()
def restart(self, options):
"""
Restart running containers.
Usage: restart [options] [--] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = timeout_from_opts(options)
containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
exit_if(not containers, 'No containers to restart', 1)
@metrics()
def top(self, options):
"""
Display the running processes
Usage: top [SERVICE...]
"""
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=False) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name')
)
for idx, container in enumerate(containers):
if idx > 0:
print()
top_data = self.project.client.top(container.name)
headers = top_data.get("Titles")
rows = []
for process in top_data.get("Processes", []):
rows.append(process)
print(container.name)
print(Formatter.table(headers, rows))
@metrics()
def unpause(self, options):
"""
Unpause services.
Usage: unpause [SERVICE...]
"""
containers = self.project.unpause(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to unpause', 1)
@metrics()
def up(self, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [--scale SERVICE=NUM...] [--] [SERVICE...]
Options:
-d, --detach Detached mode: Run containers in the background,
print new container names. Incompatible with
--abort-on-container-exit.
--no-color Produce monochrome output.
--quiet-pull Pull without printing progress information
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration
and image haven't changed.
--always-recreate-deps Recreate dependent containers.
Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate
them. Incompatible with --force-recreate and -V.
--no-build Don't build an image, even if it's missing.
--no-start Don't start the services after creating them.
--build Build images before starting containers.
--abort-on-container-exit Stops all containers if any container was
stopped. Incompatible with -d.
--attach-dependencies Attach to dependent containers.
-t, --timeout TIMEOUT Use this timeout in seconds for container
shutdown when attached or when containers are
already running. (default: 10)
-V, --renew-anon-volumes Recreate anonymous volumes instead of retrieving
data from the previous containers.
--remove-orphans Remove containers for services not defined
in the Compose file.
--exit-code-from SERVICE Return the exit code of the selected service
container. Implies --abort-on-container-exit.
--scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the
`scale` setting in the Compose file if present.
--no-log-prefix Don't print prefix in logs.
"""
start_deps = not options['--no-deps']
always_recreate_deps = options['--always-recreate-deps']
exit_value_from = exitval_from_opts(options, self.project)
cascade_stop = options['--abort-on-container-exit']
service_names = options['SERVICE']
timeout = timeout_from_opts(options)
remove_orphans = options['--remove-orphans']
detached = options.get('--detach')
no_start = options.get('--no-start')
attach_dependencies = options.get('--attach-dependencies')
keep_prefix = not options.get('--no-log-prefix')
if detached and (cascade_stop or exit_value_from or attach_dependencies):
raise UserError(
"-d cannot be combined with --abort-on-container-exit or --attach-dependencies.")
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and remove_orphans:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
opts = ['--detach', '--abort-on-container-exit', '--exit-code-from', '--attach-dependencies']
for excluded in [x for x in opts if options.get(x) and no_start]:
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD', True)
with up_shutdown_context(self.project, service_names, timeout, detached):
warn_for_swarm_mode(self.project.client)
def up(rebuild):
return self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
remove_orphans=remove_orphans,
ignore_orphans=ignore_orphans,
scale_override=parse_scale_args(options['--scale']),
start=not no_start,
always_recreate_deps=always_recreate_deps,
reset_container_image=rebuild,
renew_anonymous_volumes=options.get('--renew-anon-volumes'),
silent=options.get('--quiet-pull'),
cli=native_builder,
attach_dependencies=attach_dependencies,
)
try:
to_attach = up(False)
except docker.errors.ImageNotFound as e:
log.error(
"The image for the service you're trying to recreate has been removed. "
"If you continue, volume data could be lost. Consider backing up your data "
"before continuing.\n"
)
res = yesno("Continue with the new image? [yN]", False)
if res is None or not res:
raise e
to_attach = up(True)
if detached or no_start:
return
attached_containers = filter_attached_containers(
to_attach,
service_names,
attach_dependencies)
log_printer = log_printer_from_project(
self.project,
attached_containers,
options['--no-color'],
{'follow': True},
cascade_stop,
event_stream=self.project.events(service_names=service_names),
keep_prefix=keep_prefix)
print("Attaching to", list_containers(log_printer.containers))
cascade_starter = log_printer.run()
if cascade_stop:
print("Aborting on container exit...")
all_containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
exit_code = compute_exit_code(
exit_value_from, attached_containers, cascade_starter, all_containers
)
self.project.stop(service_names=service_names, timeout=timeout)
if exit_value_from:
exit_code = compute_service_exit_code(exit_value_from, attached_containers)
sys.exit(exit_code)
@classmethod
@metrics()
def version(cls, options):
"""
Show version information and quit.
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def compute_service_exit_code(exit_value_from, attached_containers):
candidates = list(filter(
lambda c: c.service == exit_value_from,
attached_containers))
if not candidates:
log.error(
'No containers matching the spec "{}" '
'were run.'.format(exit_value_from)
)
return 2
if len(candidates) > 1:
exit_values = filter(
lambda e: e != 0,
[c.inspect()['State']['ExitCode'] for c in candidates]
)
return exit_values[0]
return candidates[0].inspect()['State']['ExitCode']
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
exit_code = 0
for e in all_containers:
if (not e.is_running and cascade_starter == e.name):
if not e.exit_code == 0:
exit_code = e.exit_code
break
return exit_code
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
renew_anonymous_volumes = options.get('--renew-anon-volumes')
if force_recreate and no_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
if no_recreate and renew_anonymous_volumes:
raise UserError('--no-recreate and --renew-anon-volumes cannot be combined.')
if force_recreate or renew_anonymous_volumes:
return ConvergenceStrategy.always
if no_recreate:
return ConvergenceStrategy.never
return ConvergenceStrategy.changed
def timeout_from_opts(options):
timeout = options.get('--timeout')
return None if timeout is None else int(timeout)
def image_digests_for_project(project):
try:
return get_image_digests(project)
except MissingDigests as e:
def list_images(images):
return "\n".join(" {}".format(name) for name in sorted(images))
paras = ["Some images are missing digests."]
if e.needs_push:
command_hint = (
"Use `docker push {}` to push them. "
.format(" ".join(sorted(e.needs_push)))
)
paras += [
"The following images can be pushed:",
list_images(e.needs_push),
command_hint,
]
if e.needs_pull:
command_hint = (
"Use `docker pull {}` to pull them. "
.format(" ".join(sorted(e.needs_pull)))
)
paras += [
"The following images need to be pulled:",
list_images(e.needs_pull),
command_hint,
]
raise UserError("\n\n".join(paras))
def exitval_from_opts(options, project):
exit_value_from = options.get('--exit-code-from')
if exit_value_from:
if not options.get('--abort-on-container-exit'):
log.warning('using --exit-code-from implies --abort-on-container-exit')
options['--abort-on-container-exit'] = True
if exit_value_from not in [s.name for s in project.get_services()]:
log.error('No service named "%s" was found in your compose file.',
exit_value_from)
sys.exit(2)
return exit_value_from
def image_type_from_opt(flag, value):
if not value:
return ImageType.none
try:
return ImageType[value]
except KeyError:
raise UserError("%s flag must be one of: all, local" % flag)
def build_action_from_opts(options):
if options['--build'] and options['--no-build']:
raise UserError("--build and --no-build can not be combined.")
if options['--build']:
return BuildAction.force
if options['--no-build']:
return BuildAction.skip
return BuildAction.none
def build_one_off_container_options(options, detach, command):
container_options = {
'command': command,
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
'stdin_open': options.get('stdin_open'),
'detach': detach,
}
if options['-e']:
container_options['environment'] = Environment.from_command_line(
parse_environment(options['-e'])
)
if options['--label']:
container_options['labels'] = parse_labels(options['--label'])
if options.get('--entrypoint') is not None:
container_options['entrypoint'] = (
[""] if options['--entrypoint'] == '' else options['--entrypoint']
)
# Ensure that run command remains one-off (issue #6302)
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
if options['--publish']:
container_options['ports'] = options.get('--publish')
if options['--name']:
container_options['name'] = options['--name']
if options['--workdir']:
container_options['working_dir'] = options['--workdir']
if options['--volume']:
volumes = [VolumeSpec.parse(i) for i in options['--volume']]
container_options['volumes'] = volumes
return container_options
def run_one_off_container(container_options, project, service, options, toplevel_options,
toplevel_environment):
native_builder = toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
detach = options.get('--detach')
use_network_aliases = options.get('--use-aliases')
service.scale_num = 1
containers = project.up(
service_names=[service.name],
start_deps=not options['--no-deps'],
strategy=ConvergenceStrategy.never,
detached=True,
rescale=False,
cli=native_builder,
one_off=True,
override_options=container_options,
)
try:
container = next(c for c in containers if c.service == service.name)
except StopIteration:
raise OperationFailedError('Could not bring up the requested service')
if detach:
service.start_container(container, use_network_aliases)
print(container.name)
return
def remove_container():
if options['--rm']:
project.client.remove_container(container.id, force=True, v=True)
use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
signals.set_signal_handler_to_shutdown()
signals.set_signal_handler_to_hang_up()
try:
try:
if IS_WINDOWS_PLATFORM or use_cli:
service.connect_container_to_networks(container, use_network_aliases)
exit_code = call_docker(
get_docker_start_call(container_options, container.id),
toplevel_options, toplevel_environment
)
else:
operation = RunOperation(
project.client,
container.id,
interactive=not options['-T'],
logs=False,
)
pty = PseudoTerminal(project.client, operation)
sockets = pty.sockets()
service.start_container(container, use_network_aliases)
pty.start(sockets)
exit_code = container.wait()
except (signals.ShutdownException):
project.client.stop(container.id)
exit_code = 1
except (signals.ShutdownException, signals.HangUpException):
project.client.kill(container.id)
remove_container()
sys.exit(2)
remove_container()
sys.exit(exit_code)
def get_docker_start_call(container_options, container_id):
docker_call = ["start"]
if not container_options.get('detach'):
docker_call.append("--attach")
if container_options.get('stdin_open'):
docker_call.append("--interactive")
docker_call.append(container_id)
return docker_call
def log_printer_from_project(
project,
containers,
monochrome,
log_args,
cascade_stop=False,
event_stream=None,
keep_prefix=True,
):
return LogPrinter(
[c for c in containers if c.log_driver not in (None, 'none')],
build_log_presenters(project.service_names, monochrome, keep_prefix),
event_stream or project.events(),
cascade_stop=cascade_stop,
log_args=log_args)
def filter_attached_containers(containers, service_names, attach_dependencies=False):
return filter_attached_for_up(
containers,
service_names,
attach_dependencies,
lambda container: container.service)
@contextlib.contextmanager
def up_shutdown_context(project, service_names, timeout, detached):
if detached:
yield
return
signals.set_signal_handler_to_shutdown()
try:
try:
yield
except signals.ShutdownException:
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
except signals.ShutdownException:
project.kill(service_names=service_names)
sys.exit(2)
def list_containers(containers):
return ", ".join(c.name for c in containers)
def exit_if(condition, message, exit_code):
if condition:
log.error(message)
raise SystemExit(exit_code)
def call_docker(args, dockeropts, environment):
executable_path = find_executable('docker')
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
tls = dockeropts.get('--tls', False)
ca_cert = dockeropts.get('--tlscacert')
cert = dockeropts.get('--tlscert')
key = dockeropts.get('--tlskey')
verify = dockeropts.get('--tlsverify')
host = dockeropts.get('--host')
context = dockeropts.get('--context')
tls_options = []
if tls:
tls_options.append('--tls')
if ca_cert:
tls_options.extend(['--tlscacert', ca_cert])
if cert:
tls_options.extend(['--tlscert', cert])
if key:
tls_options.extend(['--tlskey', key])
if verify:
tls_options.append('--tlsverify')
if host:
tls_options.extend(
['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))]
)
if context:
tls_options.extend(
['--context', context]
)
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
filtered_env = {k: v for k, v in environment.items() if v is not None}
return subprocess.call(args, env=filtered_env)
def parse_scale_args(options):
res = {}
for s in options:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError(
'Number of containers for service "%s" is not a number' % service_name
)
res[service_name] = num
return res
def build_exec_command(options, container_id, command):
args = ["exec"]
if options["--detach"]:
args += ["--detach"]
else:
args += ["--interactive"]
if not options["-T"]:
args += ["--tty"]
if options["--privileged"]:
args += ["--privileged"]
if options["--user"]:
args += ["--user", options["--user"]]
if options["--env"]:
for env_variable in options["--env"]:
args += ["--env", env_variable]
if options["--workdir"]:
args += ["--workdir", options["--workdir"]]
args += [container_id]
args += command
return args
def has_container_with_state(containers, state):
states = {
'running': lambda c: c.is_running,
'stopped': lambda c: not c.is_running,
'paused': lambda c: c.is_paused,
'restarting': lambda c: c.is_restarting,
}
for container in containers:
if state not in states:
raise UserError("Invalid state: %s" % state)
if states[state](container):
return True
def filter_services(filt, services, project):
def should_include(service):
for f in filt:
if f == 'status':
state = filt[f]
containers = project.containers([service.name], stopped=True)
if not has_container_with_state(containers, state):
return False
elif f == 'source':
source = filt[f]
if source == 'image' or source == 'build':
if source not in service.options:
return False
else:
raise UserError("Invalid value for source filter: %s" % source)
else:
raise UserError("Invalid filter: %s" % f)
return True
return filter(should_include, services)
def build_filter(arg):
filt = {}
if arg is not None:
if '=' not in arg:
raise UserError("Arguments to --filter should be in form KEY=VAL")
key, val = arg.split('=', 1)
filt[key] = val
return filt
def warn_for_swarm_mode(client):
info = client.info()
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
if info.get('ServerVersion', '').startswith('ucp'):
# UCP does multi-node scheduling with traditional Compose files.
return
log.warning(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use `docker stack deploy`.\n"
)
|
emanuelschuetze/OpenSlides
|
refs/heads/master
|
openslides/users/urls.py
|
2
|
from django.conf.urls import url
from . import views
urlpatterns = [
# Auth
url(r"^login/$", views.UserLoginView.as_view(), name="user_login"),
url(r"^logout/$", views.UserLogoutView.as_view(), name="user_logout"),
url(r"^whoami/$", views.WhoAmIView.as_view(), name="user_whoami"),
url(r"^setpassword/$", views.SetPasswordView.as_view(), name="user_setpassword"),
url(
r"^reset-password/$",
views.PasswordResetView.as_view(),
name="user_reset_password",
),
url(
r"^reset-password-confirm/$",
views.PasswordResetConfirmView.as_view(),
name="password_reset_confirm",
),
]
|
andymckay/django
|
refs/heads/master
|
django/contrib/gis/db/backends/base.py
|
79
|
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = {}
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geography type?
geography = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
if isinstance(name, unicode):
name = name.encode('ascii')
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError
def spatial_ref_sys(self):
raise NotImplementedError
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception, msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception, msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m: return (float(m.group('major')), float(m.group('flattening')))
else: return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __unicode__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return unicode(self.srs)
except:
return unicode(self.wkt)
|
behzadnouri/scipy
|
refs/heads/master
|
scipy/special/tests/test_precompute_utils.py
|
17
|
from __future__ import division, print_function, absolute_import
from scipy._lib.six import with_metaclass
from numpy.testing import dec
from scipy.special._testutils import MissingModule, check_version, DecoratorMeta
from scipy.special._mptestutils import mp_assert_allclose
from scipy.special._precompute.utils import lagrange_inversion
try:
import sympy
except ImportError:
sympy = MissingModule('sympy')
try:
import mpmath as mp
except ImportError:
try:
from sympy import mpmath as mp
except ImportError:
mp = MissingModule('mpmath')
class TestInversion(with_metaclass(DecoratorMeta, object)):
decorators = [(dec.slow, None),
(check_version, (sympy, '0.7')),
(check_version, (mp, '0.19'))]
def test_log(self):
with mp.workdps(30):
logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
invlogcoeffs = lagrange_inversion(logcoeffs)
mp_assert_allclose(invlogcoeffs, expcoeffs)
def test_sin(self):
with mp.workdps(30):
sincoeffs = mp.taylor(mp.sin, 0, 10)
asincoeffs = mp.taylor(mp.asin, 0, 10)
invsincoeffs = lagrange_inversion(sincoeffs)
mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
|
vegetableman/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
|
124
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for watchlist.py.'''
import unittest2 as unittest
import watchlist
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class WatchListTest(unittest.TestCase):
def test_basic_error_message(self):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(0, line_number)
self.assertEqual('watchlist/general', category)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = watchlist.WatchListChecker('watchlist', error_handler)
checker.check(['{"DEFINTIONS": {}}'])
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
|
PaulStoffregen/Arduino-1.6.5-Teensyduino
|
refs/heads/master
|
arduino-core/src/processing/app/i18n/python/requests/packages/charade/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
dutradda/myreco
|
refs/heads/master
|
tests/integration/engine_objects/test_engine_objects_integration.py
|
1
|
# MIT License
# Copyright (c) 2016 Diogo Dutra <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import tempfile
from datetime import datetime
from time import sleep
from unittest import mock
from swaggerit.models._base import _all_models
from tests.integration.fixtures import TopSellerArrayTest
import pytest
import ujson
@pytest.fixture
def init_db(models, session, api):
user = {
'name': 'test',
'email': 'test',
'password': 'test',
'admin': True
}
session.loop.run_until_complete(models['users'].insert(session, user))
tmp = tempfile.TemporaryDirectory()
store = {
'name': 'test',
'country': 'test',
'configuration': {}
}
session.loop.run_until_complete(models['stores'].insert(session, store))
item_type = {
'name': 'products',
'schema': {
'type': 'object',
'id_names': ['sku'],
'properties': {'sku': {'type': 'string'}}
},
'stores': [{'id': 1}]
}
session.loop.run_until_complete(models['item_types'].insert(session, item_type))
strategy = {
'name': 'test',
'class_module': 'tests.integration.fixtures',
'class_name': 'EngineStrategyTest'
}
session.loop.run_until_complete(models['engine_strategies'].insert(session, strategy))
engine_object = {
'name': 'Top Seller Object',
'type': 'top_seller_array',
'configuration': {'days_interval': 7},
'store_id': 1,
'item_type_id': 1,
'strategy_id': 1
}
session.loop.run_until_complete(models['engine_objects'].insert(session, engine_object))
yield tmp.name
tmp.cleanup()
_all_models.pop('store_items_products_1', None)
class TestEngineObjectsModelPost(object):
async def test_post_without_body(self, init_db, client, headers, headers_without_content_type):
client = await client
resp = await client.post('/engine_objects/', headers=headers)
assert resp.status == 400
assert (await resp.json()) == {'message': 'Request body is missing'}
async def test_post_with_invalid_body(self, init_db, client, headers, headers_without_content_type):
client = await client
resp = await client.post('/engine_objects/', headers=headers, data='[{}]')
assert resp.status == 400
assert (await resp.json()) == {
'message': "'name' is a required property. "\
"Failed validating instance['0'] for schema['items']['required']",
'schema': {
'type': 'object',
'additionalProperties': False,
'required': ['name', 'type', 'configuration', 'strategy_id', 'item_type_id', 'store_id'],
'properties': {
'name': {'type': 'string'},
'type': {'type': 'string'},
'strategy_id': {'type': 'integer'},
'item_type_id': {'type': 'integer'},
'store_id': {'type': 'integer'},
'configuration': {}
}
}
}
async def test_post(self, init_db, client, headers, headers_without_content_type):
client = await client
body = [{
'name': 'Top Seller Object Test',
'type': 'top_seller_array',
'configuration': {'days_interval': 7},
'store_id': 1,
'item_type_id': 1,
'strategy_id': 1
}]
resp = await client.post('/engine_objects/', headers=headers, data=ujson.dumps(body))
resp_json = (await resp.json())
body[0]['id'] = 2
body[0]['store'] = resp_json[0]['store']
body[0]['strategy'] = resp_json[0]['strategy']
body[0]['item_type'] = resp_json[0]['item_type']
assert resp.status == 201
assert resp_json == body
async def test_post_with_invalid_grant(self, client):
client = await client
body = [{
'name': 'Top Seller Object Test',
'type': 'top_seller_array',
'configuration': {'days_interval': 7},
'store_id': 1,
'item_type_id': 1,
'strategy_id': 1
}]
resp = await client.post('/engine_objects/', headers={'Authorization': 'invalid'}, data=ujson.dumps(body))
assert resp.status == 401
assert (await resp.json()) == {'message': 'Invalid authorization'}
class TestEngineObjectsModelGet(object):
async def test_get_not_found(self, init_db, headers_without_content_type, client):
client = await client
resp = await client.get(
'/engine_objects/?store_id=2&item_type_id=1&strategy_id=1',
headers=headers_without_content_type
)
assert resp.status == 404
async def test_get_invalid_with_body(self, init_db, headers, client):
client = await client
resp = await client.get(
'/engine_objects/?store_id=1&item_type_id=1&strategy_id=1',
headers=headers,
data='{}'
)
assert resp.status == 400
assert await resp.json() == {'message': 'Request body is not acceptable'}
async def test_get_valid(self, init_db, headers, headers_without_content_type, client):
body = [{
'name': 'Top Seller Object',
'type': 'top_seller_array',
'configuration': {"days_interval": 7},
'store_id': 1,
'item_type_id': 1,
'strategy_id': 1,
'id': 1,
'store': {
'id': 1,
'name': 'test',
'country': 'test',
'configuration': {}
},
'item_type': {
'id': 1,
'store_items_class': None,
'stores': [{
'configuration': {},
'country': 'test',
'id': 1,
'name': 'test'
}],
'name': 'products',
'schema': {
'type': 'object',
'id_names': ['sku'],
'properties': {'sku': {'type': 'string'}}
},
'available_filters': [{'name': 'sku', 'schema': {'type': 'string'}}]
},
'strategy': {
'id': 1,
'name': 'test',
'class_module': 'tests.integration.fixtures',
'class_name': 'EngineStrategyTest',
'object_types': ['top_seller_array']
}
}]
client = await client
resp = await client.get(
'/engine_objects/?store_id=1&item_type_id=1&strategy_id=1',
headers=headers_without_content_type
)
assert resp.status == 200
assert await resp.json() == body
class TestEngineObjectsModelUriTemplatePatch(object):
async def test_patch_without_body(self, init_db, client, headers, headers_without_content_type):
client = await client
resp = await client.patch('/engine_objects/1/', headers=headers, data='')
assert resp.status == 400
assert (await resp.json()) == {'message': 'Request body is missing'}
async def test_patch_with_invalid_body(self, init_db, client, headers, headers_without_content_type):
client = await client
resp = await client.patch('/engine_objects/1/', headers=headers, data='{}')
assert resp.status == 400
assert (await resp.json()) == {
'message': '{} does not have enough properties. '\
"Failed validating instance for schema['minProperties']",
'schema': {
'type': 'object',
'additionalProperties': False,
'minProperties': 1,
'properties': {
'name': {'type': 'string'},
'configuration': {}
}
}
}
async def test_patch_with_invalid_config(self, init_db, client, headers, headers_without_content_type):
client = await client
body = {
'configuration': {}
}
resp = await client.patch('/engine_objects/1/', headers=headers, data=ujson.dumps(body))
assert resp.status == 400
print(ujson.dumps(await resp.json(), indent=4))
assert (await resp.json()) == {
'message': "'days_interval' is a required property. "\
"Failed validating instance for schema['required']",
'schema': {
'type': 'object',
'required': ['days_interval'],
'additionalProperties': False,
'properties': {
'days_interval': {'type': 'integer'}
}
}
}
async def test_patch_not_found(self, init_db, client, headers, headers_without_content_type):
client = await client
body = {
'name': 'Top Seller Object Test'
}
resp = await client.patch('/engine_objects/2/', headers=headers, data=ujson.dumps(body))
assert resp.status == 404
async def test_patch(self, init_db, client, headers, headers_without_content_type):
client = await client
body = [{
'name': 'Top Seller Object Test',
'type': 'top_seller_array',
'configuration': {'days_interval': 7},
'store_id': 1,
'item_type_id': 1,
'strategy_id': 1
}]
resp = await client.post('/engine_objects/', headers=headers, data=ujson.dumps(body))
obj = (await resp.json())[0]
body = {
'name': 'test2'
}
resp = await client.patch('/engine_objects/2/', headers=headers, data=ujson.dumps(body))
obj['name'] = 'test2'
assert resp.status == 200
assert (await resp.json()) == obj
class TestEngineObjectsModelUriTemplateGet(object):
async def test_get_with_body(self, init_db, headers, client):
client = await client
resp = await client.get('/engine_objects/1/', headers=headers, data='{}')
assert resp.status == 400
assert await resp.json() == {'message': 'Request body is not acceptable'}
async def test_get_not_found(self, init_db, headers_without_content_type, client):
client = await client
resp = await client.get('/engine_objects/2/', headers=headers_without_content_type)
assert resp.status == 404
async def test_get(self, init_db, headers, headers_without_content_type, client):
client = await client
resp = await client.get('/engine_objects/1/', headers=headers_without_content_type)
body = {
'name': 'Top Seller Object',
'type': 'top_seller_array',
'configuration': {"days_interval": 7},
'store_id': 1,
'item_type_id': 1,
'strategy_id': 1,
'id': 1,
'store': {
'id': 1,
'name': 'test',
'country': 'test',
'configuration': {}
},
'item_type': {
'id': 1,
'store_items_class': None,
'stores': [{
'configuration': {},
'country': 'test',
'id': 1,
'name': 'test'
}],
'name': 'products',
'schema': {
'type': 'object',
'id_names': ['sku'],
'properties': {'sku': {'type': 'string'}}
},
'available_filters': [{'name': 'sku', 'schema': {'type': 'string'}}]
},
'strategy': {
'id': 1,
'name': 'test',
'class_module': 'tests.integration.fixtures',
'class_name': 'EngineStrategyTest',
'object_types': ['top_seller_array']
}
}
assert resp.status == 200
assert await resp.json() == body
class TestEngineObjectsModelUriTemplateDelete(object):
async def test_delete_with_body(self, init_db, client, headers):
client = await client
resp = await client.delete('/engine_objects/1/', headers=headers, data='{}')
assert resp.status == 400
assert (await resp.json()) == {'message': 'Request body is not acceptable'}
async def test_delete_valid(self, init_db, client, headers, headers_without_content_type):
client = await client
resp = await client.get('/engine_objects/1/', headers=headers_without_content_type)
assert resp.status == 200
resp = await client.delete('/engine_objects/1/', headers=headers_without_content_type)
assert resp.status == 204
resp = await client.get('/engine_objects/1/', headers=headers_without_content_type)
assert resp.status == 404
def datetime_mock():
mock_ = mock.MagicMock()
mock_.now.return_value = datetime(1900, 1, 1)
return mock_
async def _wait_job_finish(client, headers_without_content_type, job_name='export'):
sleep(0.05)
while True:
resp = await client.get(
'/engine_objects/1/{}?job_hash=6342e10bd7dca3240c698aa79c98362e'.format(job_name),
headers=headers_without_content_type)
if (await resp.json())['status'] != 'running':
break
return resp
def set_patches(monkeypatch):
monkeypatch.setattr('swaggerit.models.orm._jobs_meta.random.getrandbits',
mock.MagicMock(return_value=131940827655846590526331314439483569710))
monkeypatch.setattr('swaggerit.models.orm._jobs_meta.datetime', datetime_mock())
class TestEngineObjectsModelsDataImporter(object):
async def test_importer_post(self, init_db, headers, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
resp = await client.post('/engine_objects/1/import_data', headers=headers_without_content_type)
assert resp.status == 201
assert await resp.json() == {'job_hash': '6342e10bd7dca3240c698aa79c98362e'}
await _wait_job_finish(client, headers_without_content_type, 'import_data')
async def test_importer_get_running(self, init_db, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await client.post('/engine_objects/1/import_data', headers=headers_without_content_type)
resp = await client.get('/engine_objects/1/import_data?job_hash=6342e10bd7dca3240c698aa79c98362e',
headers=headers_without_content_type)
assert await resp.json() == {'status': 'running'}
await _wait_job_finish(client, headers_without_content_type, 'import_data')
async def test_importer_get_done(self, init_db, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await client.post('/engine_objects/1/import_data', headers=headers_without_content_type)
resp = await _wait_job_finish(client, headers_without_content_type, 'import_data')
assert await resp.json() == {
'status': 'done',
'result': {'lines_count': 3},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
async def test_importer_get_with_error(self, init_db, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
monkeypatch.setattr('tests.integration.fixtures.TopSellerArrayTest.get_data',
mock.MagicMock(side_effect=Exception('testing')))
client = await client
await client.post('/engine_objects/1/import_data', headers=headers_without_content_type)
resp = await _wait_job_finish(client, headers_without_content_type, 'import_data')
assert await resp.json() == {
'status': 'error',
'result': {'message': 'testing', 'name': 'Exception'},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
async def _post_products(client, headers, headers_without_content_type, products=[{'sku': 'test'}]):
resp = await client.post('/item_types/1/items?store_id=1',
data=ujson.dumps(products), headers=headers)
resp = await client.post('/item_types/1/update_filters?store_id=1',
headers=headers_without_content_type)
sleep(0.05)
while True:
resp = await client.get(
'/item_types/1/update_filters?store_id=1&job_hash=6342e10bd7dca3240c698aa79c98362e',
headers=headers_without_content_type)
if (await resp.json())['status'] != 'running':
break
return resp
def set_readers_builders_patch(monkeypatch, values=None):
if values is None:
values = [[ujson.dumps({'value': 1, 'item_key': 'test'}).encode()]]
readers_builder = values
mock_ = mock.MagicMock()
mock_.return_value = readers_builder
monkeypatch.setattr(
'myreco.engine_objects.object_base.EngineObjectBase._build_csv_readers',
mock_
)
class TestEngineObjectsModelsObjectsExporter(object):
async def test_exporter_post(self, init_db, headers_without_content_type, headers, client, monkeypatch):
set_patches(monkeypatch)
set_readers_builders_patch(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
resp = await client.post('/engine_objects/1/export', headers=headers_without_content_type)
assert await resp.json() == {'job_hash': '6342e10bd7dca3240c698aa79c98362e'}
await _wait_job_finish(client, headers_without_content_type)
async def test_exporter_get_running(self, init_db, headers_without_content_type, headers, client, monkeypatch, loop):
set_patches(monkeypatch)
prods = [ujson.dumps({'value': i, 'item_key': 'test{}'.format(i)}).encode() for i in range(100)]
set_readers_builders_patch(monkeypatch, [[b'\n'.join(prods)]])
client = await client
products = [{'sku': 'test{}'.format(i)} for i in range(10)]
await _post_products(client, headers, headers_without_content_type, products)
await client.post('/engine_objects/1/export', headers=headers_without_content_type)
resp = await client.get(
'/engine_objects/1/export?job_hash=6342e10bd7dca3240c698aa79c98362e', headers=headers_without_content_type)
assert await resp.json() == {'status': 'running'}
await _wait_job_finish(client, headers_without_content_type)
async def test_exporter_get_done(self, init_db, headers_without_content_type, headers, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
set_readers_builders_patch(monkeypatch)
await client.post('/engine_objects/1/export', headers=headers_without_content_type)
resp = await _wait_job_finish(client, headers_without_content_type)
assert await resp.json() == {
'status': 'done',
'result': {'length': 1, 'max_sells': 1, 'min_sells': 1},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
async def test_exporter_get_with_error(
self, init_db, headers_without_content_type, headers, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
set_readers_builders_patch(monkeypatch, [])
await client.post('/engine_objects/1/export', headers=headers_without_content_type)
resp = await _wait_job_finish(client, headers_without_content_type)
assert await resp.json() == {
'status': 'error',
'result': {
'message': "No data found for engine object 'Top Seller Object'",
'name': 'EngineError'
},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
def CoroMock():
coro = mock.MagicMock(name="CoroutineResult")
corofunc = mock.MagicMock(name="CoroutineFunction", side_effect=asyncio.coroutine(coro))
corofunc.coro = coro
return corofunc
def set_data_importer_patch(monkeypatch, mock_=None):
if mock_ is None:
mock_ = mock.MagicMock()
monkeypatch.setattr('tests.integration.fixtures.TopSellerArrayTest.get_data', mock_)
return mock_
class TestEngineObjectsModelsObjectsExporterWithImport(object):
async def test_exporter_post_with_import(self, init_db, headers, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
set_readers_builders_patch(monkeypatch)
get_data_patch = set_data_importer_patch(monkeypatch)
get_data_patch.return_value = {}
resp = await client.post('/engine_objects/1/export?import_data=true',
headers=headers_without_content_type)
hash_ = await resp.json()
await _wait_job_finish(client, headers_without_content_type)
called = bool(TopSellerArrayTest.get_data.called)
TopSellerArrayTest.get_data.reset_mock()
assert hash_ == {'job_hash': '6342e10bd7dca3240c698aa79c98362e'}
assert called
async def test_exporter_get_running_with_import(self, init_db, headers, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
def func(x, y, z):
sleep(1)
return {}
await _post_products(client, headers, headers_without_content_type)
set_readers_builders_patch(monkeypatch)
set_data_importer_patch(monkeypatch, func)
await client.post('/engine_objects/1/export?import_data=true',
headers=headers_without_content_type)
resp = await client.get(
'/engine_objects/1/export?job_hash=6342e10bd7dca3240c698aa79c98362e',
headers=headers_without_content_type)
assert await resp.json() == {'status': 'running'}
await _wait_job_finish(client, headers_without_content_type)
async def test_exporter_get_done_with_import(self, init_db, headers, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
set_readers_builders_patch(monkeypatch)
await client.post('/engine_objects/1/export?import_data=true',
headers=headers_without_content_type)
await _wait_job_finish(client, headers_without_content_type)
resp = await client.get(
'/engine_objects/1/export?job_hash=6342e10bd7dca3240c698aa79c98362e',
headers=headers_without_content_type)
assert await resp.json() == {
'status': 'done',
'result': {
'importer': {'lines_count': 3},
'exporter': {
'length': 1,
'max_sells': 1,
'min_sells': 1
}
},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
async def test_exporter_get_with_error_in_import_with_import(
self, init_db, headers, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
get_data_patch = set_data_importer_patch(monkeypatch)
get_data_patch.side_effect = Exception('testing')
await client.post('/engine_objects/1/export?import_data=true', headers=headers_without_content_type)
await _wait_job_finish(client, headers_without_content_type)
resp = await client.get(
'/engine_objects/1/export?job_hash=6342e10bd7dca3240c698aa79c98362e', headers=headers_without_content_type)
assert await resp.json() == {
'status': 'error',
'result': {'message': 'testing', 'name': 'Exception'},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
async def test_exporter_get_with_error_in_export_with_import(
self, init_db, headers, headers_without_content_type, client, monkeypatch):
set_patches(monkeypatch)
client = await client
await _post_products(client, headers, headers_without_content_type)
set_readers_builders_patch(monkeypatch, [])
await client.post('/engine_objects/1/export?import_data=true', headers=headers_without_content_type)
await _wait_job_finish(client, headers_without_content_type)
resp = await client.get(
'/engine_objects/1/export?job_hash=6342e10bd7dca3240c698aa79c98362e', headers=headers_without_content_type)
assert await resp.json() == {
'status': 'error',
'result': {
'message': "No data found for engine object 'Top Seller Object'",
'name': 'EngineError'
},
'time_info': {
'elapsed': '0:00',
'start': '1900-01-01 00:00',
'end': '1900-01-01 00:00'
}
}
|
bloer/bgexplorer
|
refs/heads/master
|
docs/conf.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# bgexplorer documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 19 09:19:30 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bgexplorer'
copyright = '2017, B. Loer'
author = 'B. Loer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1dev0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bgexplorerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bgexplorer.tex', 'bgexplorer Documentation',
'B. Loer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bgexplorer', 'bgexplorer Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bgexplorer', 'bgexplorer Documentation',
author, 'bgexplorer', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
marty331/jakesclock
|
refs/heads/master
|
flask/lib/python2.7/site-packages/setuptools/command/register.py
|
986
|
import distutils.command.register as orig
class register(orig.register):
__doc__ = orig.register.__doc__
def run(self):
# Make sure that we are using valid current name/version info
self.run_command('egg_info')
orig.register.run(self)
|
SaptakS/open-event-orga-server
|
refs/heads/master
|
migrations/versions/79e54124b415_.py
|
10
|
"""empty message
Revision ID: 79e54124b415
Revises: affad812c947
Create Date: 2016-08-29 12:08:14.717319
"""
# revision identifiers, used by Alembic.
revision = '79e54124b415'
down_revision = 'affad812c947'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('speaker', sa.Column('icon', sa.String(), nullable=True))
op.add_column('speaker', sa.Column('small', sa.String(), nullable=True))
op.add_column('speaker', sa.Column('thumbnail', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('speaker', 'thumbnail')
op.drop_column('speaker', 'small')
op.drop_column('speaker', 'icon')
### end Alembic commands ###
|
nion-software/nionswift
|
refs/heads/master
|
nion/swift/model/DocumentModel.py
|
1
|
from __future__ import annotations
# standard libraries
import abc
import asyncio
import collections
import contextlib
import copy
import datetime
import functools
import gettext
import threading
import time
import typing
import uuid
import weakref
# local libraries
from nion.data import DataAndMetadata
from nion.swift.model import Cache
from nion.swift.model import Changes
from nion.swift.model import Connection
from nion.swift.model import Connector
from nion.swift.model import DataGroup
from nion.swift.model import DataItem
from nion.swift.model import DataStructure
from nion.swift.model import DisplayItem
from nion.swift.model import Graphics
from nion.swift.model import Observer
from nion.swift.model import PlugInManager
from nion.swift.model import Persistence
from nion.swift.model import Processing
from nion.swift.model import Project
from nion.swift.model import Symbolic
from nion.utils import Event
from nion.utils import Geometry
from nion.utils import Observable
from nion.utils import Recorder
from nion.utils import ReferenceCounting
from nion.utils import Registry
from nion.utils import ThreadPool
_ = gettext.gettext
Processing.init()
class HardwareSourceManager:
# defines the methods of the hardware source manager that are called from this file.
def get_hardware_source_for_hardware_source_id(self, hardware_source_id: str) -> typing.Any: ...
def register_document_model(self, document_model: DocumentModel) -> typing.Any: ...
def hardware_source_manager() -> HardwareSourceManager:
return typing.cast(HardwareSourceManager, Registry.get_component("hardware_source_manager"))
def save_item_order(items: typing.List[Persistence.PersistentObject]) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return [item.item_specifier for item in items]
def restore_item_order(project: Project.Project, uuid_order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> typing.List[Persistence.PersistentObject]:
items = list()
for item_specifier in uuid_order:
items.append(project.resolve_item_specifier(item_specifier))
return items
def insert_item_order(uuid_order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]], index: int, item: Persistence.PersistentObject) -> None:
uuid_order.insert(index, item.item_specifier)
class ComputationMerge:
def __init__(self, computation: Symbolic.Computation, fn: typing.Optional[typing.Callable[[], None]] = None, closeables: typing.Optional[typing.List] = None):
self.computation = computation
self.fn = fn
self.closeables = closeables or list()
def close(self) -> None:
for closeable in self.closeables:
closeable.close()
def exec(self) -> None:
if callable(self.fn):
self.fn()
class ComputationQueueItem:
def __init__(self, *, computation=None):
self.computation = computation
self.valid = True
def recompute(self) -> typing.Optional[ComputationMerge]:
# evaluate the computation in a thread safe manner
# returns a list of functions that must be called on the main thread to finish the recompute action
# threadsafe
pending_data_item_merge: typing.Optional[ComputationMerge] = None
data_item = None
computation = self.computation
if computation.expression:
data_item = computation.get_output("target")
if computation and computation.needs_update:
try:
api = PlugInManager.api_broker_fn("~1.0", None)
if not data_item:
start_time = time.perf_counter()
compute_obj, error_text = computation.evaluate(api)
eval_time = time.perf_counter() - start_time
if error_text and computation.error_text != error_text:
def update_error_text():
computation.error_text = error_text
pending_data_item_merge = ComputationMerge(computation, update_error_text)
return pending_data_item_merge
throttle_time = max(DocumentModel.computation_min_period - (time.perf_counter() - computation.last_evaluate_data_time), 0)
time.sleep(max(throttle_time, min(eval_time * DocumentModel.computation_min_factor, 1.0)))
if self.valid and compute_obj: # TODO: race condition for 'valid'
pending_data_item_merge = ComputationMerge(computation, functools.partial(compute_obj.commit))
else:
pending_data_item_merge = ComputationMerge(computation)
else:
start_time = time.perf_counter()
data_item_clone = data_item.clone()
data_item_data_modified = data_item.data_modified or datetime.datetime.min
data_item_clone_recorder = Recorder.Recorder(data_item_clone)
api_data_item = api._new_api_object(data_item_clone)
error_text = computation.evaluate_with_target(api, api_data_item)
eval_time = time.perf_counter() - start_time
throttle_time = max(DocumentModel.computation_min_period - (time.perf_counter() - computation.last_evaluate_data_time), 0)
time.sleep(max(throttle_time, min(eval_time * DocumentModel.computation_min_factor, 1.0)))
if self.valid: # TODO: race condition for 'valid'
def data_item_merge(data_item, data_item_clone, data_item_clone_recorder):
# merge the result item clones back into the document. this method is guaranteed to run at
# periodic and shouldn't do anything too time consuming.
data_item_data_clone_modified = data_item_clone.data_modified or datetime.datetime.min
with data_item.data_item_changes(), data_item.data_source_changes():
if data_item_data_clone_modified > data_item_data_modified:
data_item.set_xdata(api_data_item.data_and_metadata)
data_item_clone_recorder.apply(data_item)
if computation.error_text != error_text:
computation.error_text = error_text
pending_data_item_merge = ComputationMerge(computation, functools.partial(data_item_merge, data_item, data_item_clone, data_item_clone_recorder), [data_item_clone, data_item_clone_recorder])
except Exception as e:
import traceback
traceback.print_exc()
# computation.error_text = _("Unable to compute data")
return pending_data_item_merge
class Transaction:
def __init__(self, transaction_manager: "TransactionManager", item, items):
self.__transaction_manager = transaction_manager
self.__item = item
self.__items = items
def close(self):
self.__transaction_manager._close_transaction(self)
self.__items = None
self.__transaction_manager = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def item(self):
return self.__item
@property
def items(self):
return copy.copy(self.__items)
def replace_items(self, items):
self.__items = items
class TransactionManager:
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
self.__transactions_lock = threading.RLock()
self.__transaction_counts = collections.Counter()
self.__transactions = list()
def close(self):
self.__document_model = None
self.__transaction_counts = None
def is_in_transaction_state(self, item) -> bool:
return self.__transaction_counts[item] > 0
@property
def transaction_count(self):
return len(list(self.__transaction_counts.elements()))
def item_transaction(self, item) -> Transaction:
"""Begin transaction state for item.
A transaction state is exists to prevent writing out to disk, mainly for performance reasons.
All changes to the object are delayed until the transaction state exits.
This method is thread safe.
"""
items = self.__build_transaction_items(item)
transaction = Transaction(self, item, items)
self.__transactions.append(transaction)
return transaction
def _close_transaction(self, transaction):
items = transaction.items
self.__close_transaction_items(items)
self.__transactions.remove(transaction)
def __build_transaction_items(self, item):
items = set()
self.__get_deep_transaction_item_set(item, items)
with self.__transactions_lock:
for item in items:
old_count = self.__transaction_counts[item]
self.__transaction_counts.update({item})
if old_count == 0:
if callable(getattr(item, "_transaction_state_entered", None)):
item._transaction_state_entered()
return items
def __close_transaction_items(self, items):
with self.__transactions_lock:
for item in items:
self.__transaction_counts.subtract({item})
if self.__transaction_counts[item] == 0:
if callable(getattr(item, "_transaction_state_exited", None)):
item._transaction_state_exited()
def __get_deep_transaction_item_set(self, item, items):
if item and not item in items:
# first the dependent items, also keep track of which items are added
old_items = copy.copy(items)
if not item in items:
items.add(item)
for dependent in self.__document_model.get_dependent_items(item):
self.__get_deep_transaction_item_set(dependent, items)
if isinstance(item, DisplayItem.DisplayItem):
for display_data_channel in item.display_data_channels:
self.__get_deep_transaction_item_set(display_data_channel, items)
for display_layer in item.display_layers:
self.__get_deep_transaction_item_set(display_layer, items)
for graphic in item.graphics:
self.__get_deep_transaction_item_set(graphic, items)
if isinstance(item, DisplayItem.DisplayDataChannel):
if item.data_item:
self.__get_deep_transaction_item_set(item.data_item, items)
if isinstance(item, DisplayItem.DisplayLayer):
if item.display_data_channel:
self.__get_deep_transaction_item_set(item.display_data_channel, items)
if isinstance(item, DataItem.DataItem):
for display_item in self.__document_model.get_display_items_for_data_item(item):
self.__get_deep_transaction_item_set(display_item, items)
if isinstance(item, DataStructure.DataStructure):
for referenced_object in item.referenced_objects:
self.__get_deep_transaction_item_set(referenced_object, items)
if isinstance(item, Connection.Connection):
self.__get_deep_transaction_item_set(item._source, items)
self.__get_deep_transaction_item_set(item._target, items)
for connection in self.__document_model.connections:
if isinstance(connection, Connection.PropertyConnection) and connection._source in items:
self.__get_deep_transaction_item_set(connection._target, items)
if isinstance(connection, Connection.PropertyConnection) and connection._target in items:
self.__get_deep_transaction_item_set(connection._source, items)
if isinstance(connection, Connection.IntervalListConnection) and connection._source in items:
self.__get_deep_transaction_item_set(connection._target, items)
for implicit_dependency in self.__document_model.implicit_dependencies:
for implicit_item in implicit_dependency.get_dependents(item):
self.__get_deep_transaction_item_set(implicit_item, items)
for item in items - old_items:
if isinstance(item, Graphics.Graphic):
self.__get_deep_transaction_item_set(item.container, items)
def _add_item(self, item):
self._rebuild_transactions()
def _remove_item(self, item):
for transaction in copy.copy(self.__transactions):
if transaction.item == item:
self._close_transaction(transaction)
self._rebuild_transactions()
def _rebuild_transactions(self):
for transaction in self.__transactions:
old_items = transaction.items
new_items = self.__build_transaction_items(transaction.item)
transaction.replace_items(new_items)
self.__close_transaction_items(old_items)
class UndeleteObjectSpecifier(Changes.UndeleteBase):
def __init__(self, document_model: "DocumentModel", computation: Symbolic.Computation, index: int, variable_index: int, object_specifier: typing.Dict):
self.computation_proxy = computation.create_proxy()
self.variable_index = variable_index
self.specifier = object_specifier
self.index = index
def close(self) -> None:
self.computation_proxy.close()
self.computation_proxy = None
def undelete(self, document_model: "DocumentModel") -> None:
computation = self.computation_proxy.item
variable = computation.variables[self.variable_index]
computation.undelete_variable_item(variable.name, self.index, self.specifier)
class UndeleteDataItem(Changes.UndeleteBase):
def __init__(self, document_model: "DocumentModel", data_item: DataItem.DataItem):
container = data_item.container
index = container.data_items.index(data_item)
uuid_order = save_item_order(document_model.data_items)
self.data_item_uuid = data_item.uuid
self.index = index
self.order = uuid_order
def close(self):
pass
def undelete(self, document_model: "DocumentModel") -> None:
document_model.restore_data_item(self.data_item_uuid, self.index)
document_model.restore_items_order("data_items", self.order)
class UndeleteDisplayItemInDataGroup(Changes.UndeleteBase):
def __init__(self, document_model: "DocumentModel", display_item: DisplayItem.DisplayItem, data_group: DataGroup.DataGroup):
self.display_item_proxy = display_item.create_proxy()
self.data_group_proxy = data_group.create_proxy()
self.index = data_group.display_items.index(display_item)
def close(self) -> None:
self.display_item_proxy.close()
self.display_item_proxy = None
self.data_group_proxy.close()
self.data_group_proxy = None
def undelete(self, document_model: "DocumentModel") -> None:
display_item = self.display_item_proxy.item
data_group = self.data_group_proxy.item
data_group.insert_display_item(self.index, display_item)
class UndeleteDisplayItem(Changes.UndeleteBase):
def __init__(self, document_model: "DocumentModel", display_item: DisplayItem.DisplayItem):
container = display_item.container
index = container.display_items.index(display_item)
uuid_order = save_item_order(document_model.display_items)
self.item_dict = display_item.write_to_dict()
self.index = index
self.order = uuid_order
def close(self):
pass
def undelete(self, document_model: "DocumentModel") -> None:
display_item = DisplayItem.DisplayItem()
display_item.begin_reading()
display_item.read_from_dict(self.item_dict)
display_item.finish_reading()
document_model.insert_display_item(self.index, display_item, update_session=False)
document_model.restore_items_order("display_items", self.order)
class ItemsController(abc.ABC):
@abc.abstractmethod
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]: ...
@abc.abstractmethod
def item_index(self, item: Persistence.PersistentObject) -> int: ...
@abc.abstractmethod
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]: ...
@abc.abstractmethod
def write_to_dict(self, data_structure: Persistence.PersistentObject) -> typing.Dict: ...
@abc.abstractmethod
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None: ...
class DataStructuresController(ItemsController):
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]:
return None
def item_index(self, data_structure: Persistence.PersistentObject) -> int:
return data_structure.container.data_structures.index(data_structure)
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return save_item_order(self.__document_model.data_structures)
def write_to_dict(self, data_structure: Persistence.PersistentObject) -> typing.Dict:
return data_structure.write_to_dict()
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
data_structure = DataStructure.DataStructure()
data_structure.begin_reading()
data_structure.read_from_dict(item_dict)
data_structure.finish_reading()
self.__document_model.insert_data_structure(index, data_structure)
self.__document_model.restore_items_order("data_structures", order)
class ComputationsController(ItemsController):
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]:
return None
def item_index(self, computation: Persistence.PersistentObject) -> int:
return computation.container.computations.index(computation)
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return save_item_order(self.__document_model.computations)
def write_to_dict(self, computation: Persistence.PersistentObject) -> typing.Dict:
return computation.write_to_dict()
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
computation = Symbolic.Computation()
computation.begin_reading()
computation.read_from_dict(item_dict)
computation.finish_reading()
# if the computation is not resolved, then undelete may require additional items to make it
# resolved. so mark it as needing update here. this is a hack.
computation.needs_update = not computation.is_resolved
self.__document_model.insert_computation(index, computation)
self.__document_model.restore_items_order("computations", order)
class ConnectionsController(ItemsController):
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]:
return None
def item_index(self, connection: Persistence.PersistentObject) -> int:
return connection.container.connections.index(connection)
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return save_item_order(self.__document_model.connections)
def write_to_dict(self, connection: Persistence.PersistentObject) -> typing.Dict:
return connection.write_to_dict()
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
item = Connection.connection_factory(item_dict.get)
item.begin_reading()
item.read_from_dict(item_dict)
item.finish_reading()
self.__document_model.insert_connection(index, item)
self.__document_model.restore_items_order("connections", order)
class GraphicsController(ItemsController):
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]:
return item.container
def item_index(self, graphic: Persistence.PersistentObject) -> int:
return graphic.container.graphics.index(graphic)
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return list()
def write_to_dict(self, graphic: Persistence.PersistentObject) -> typing.Dict:
return graphic.write_to_dict()
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
graphic = Graphics.factory(item_dict.get)
graphic.begin_reading()
graphic.read_from_dict(item_dict)
graphic.finish_reading()
display_item = typing.cast(DisplayItem.DisplayItem, container)
display_item.insert_graphic(index, graphic)
display_item.restore_properties(container_properties)
class DisplayDataChannelsController(ItemsController):
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]:
return item.container
def item_index(self, display_data_channel: Persistence.PersistentObject) -> int:
return display_data_channel.container.display_data_channels.index(display_data_channel)
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return list()
def write_to_dict(self, display_data_channel: Persistence.PersistentObject) -> typing.Dict:
return display_data_channel.write_to_dict()
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
display_data_channel = DisplayItem.display_data_channel_factory(item_dict.get)
display_data_channel.begin_reading()
display_data_channel.read_from_dict(item_dict)
display_data_channel.finish_reading()
display_item = typing.cast(DisplayItem.DisplayItem, container)
display_item.undelete_display_data_channel(index, display_data_channel)
display_item.restore_properties(container_properties)
class DisplayLayersController(ItemsController):
def __init__(self, document_model: "DocumentModel"):
self.__document_model = document_model
def get_container(self, item: Persistence.PersistentObject) -> typing.Optional[Persistence.PersistentObject]:
return item.container
def item_index(self, display_layer: Persistence.PersistentObject) -> int:
return display_layer.container.display_layers.index(display_layer)
def save_item_order(self) -> typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]:
return list()
def write_to_dict(self, display_layer: Persistence.PersistentObject) -> typing.Dict:
return display_layer.write_to_dict()
def restore_from_dict(self, item_dict: typing.Dict, index: int, container: typing.Optional[Persistence.PersistentObject], container_properties: typing.Tuple, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
display_layer = DisplayItem.display_layer_factory(item_dict.get)
display_layer.begin_reading()
display_layer.read_from_dict(item_dict)
display_layer.finish_reading()
display_item = typing.cast(DisplayItem.DisplayItem, container)
display_item.undelete_display_layer(index, display_layer)
display_item.restore_properties(container_properties)
class UndeleteItem(Changes.UndeleteBase):
def __init__(self, items_controller: ItemsController, item: Persistence.PersistentObject):
self.__items_controller = items_controller
container = self.__items_controller.get_container(item)
index = self.__items_controller.item_index(item)
self.container_item_proxy = container.create_proxy() if container else None
self.container_properties = container.save_properties() if hasattr(container, "save_properties") else dict()
self.item_dict = self.__items_controller.write_to_dict(item)
self.index = index
self.order = self.__items_controller.save_item_order()
def close(self) -> None:
if self.container_item_proxy:
self.container_item_proxy.close()
self.container_item_proxy = None
def undelete(self, document_model: "DocumentModel") -> None:
container = typing.cast(Persistence.PersistentObject, self.container_item_proxy.item) if self.container_item_proxy else None
container_properties = self.container_properties
self.__items_controller.restore_from_dict(self.item_dict, self.index, container, container_properties, self.order)
class AbstractImplicitDependency(abc.ABC):
@abc.abstractmethod
def get_dependents(self, item) -> typing.Sequence: ...
class ImplicitDependency(AbstractImplicitDependency):
def __init__(self, items: typing.Sequence, item):
self.__item = item
self.__items = items
def get_dependents(self, item) -> typing.Sequence:
return [self.__item] if item in self.__items else list()
class MappedItemManager(metaclass=Registry.Singleton):
def __init__(self):
self.__item_map = dict()
self.__item_listener_map = dict()
self.__document_map = dict()
self.changed_event = Event.Event()
def register(self, document_model: DocumentModel, item: Persistence.PersistentObject) -> str:
for r in range(1, 1000000):
r_var = f"r{r:02d}"
if not r_var in self.__item_map:
self.__item_map[r_var] = item
self.__document_map.setdefault(document_model, set()).add(r_var)
def remove_item():
self.__item_map.pop(r_var)
self.__item_listener_map.pop(r_var).close()
self.__document_map.setdefault(document_model, set()).remove(r_var)
self.changed_event.fire()
self.__item_listener_map[r_var] = item.about_to_be_removed_event.listen(remove_item)
self.changed_event.fire()
return r_var
return str()
def unregister_document(self, document_model: DocumentModel) -> None:
r_vars = self.__document_map.pop(document_model, set())
for r_var in r_vars:
self.__item_map.pop(r_var, None)
self.__item_listener_map.pop(r_var).close()
self.changed_event.fire()
@property
def item_map(self) -> typing.Mapping[str, Persistence.PersistentObject]:
return dict(self.__item_map)
def get_item_r_var(self, item: Persistence.PersistentObject) -> typing.Optional[str]:
for k, v in self.__item_map.items():
if v == item:
return k
return None
class DocumentModel(Observable.Observable, ReferenceCounting.ReferenceCounted, DataItem.SessionManager):
"""Manages storage and dependencies between data items and other objects.
The document model provides a dispatcher object which will run tasks in a thread pool.
"""
count = 0 # useful for detecting leaks in tests
computation_min_period = 0.0
computation_min_factor = 0.0
def __init__(self, project: Project.Project, *, storage_cache = None):
super().__init__()
self.__class__.count += 1
self.about_to_close_event = Event.Event()
self.data_item_will_be_removed_event = Event.Event() # will be called before the item is deleted
self.dependency_added_event = Event.Event()
self.dependency_removed_event = Event.Event()
self.related_items_changed = Event.Event()
self.computation_updated_event = Event.Event()
self.__computation_thread_pool = ThreadPool.ThreadPool()
self.__project = project
self.uuid = self._project.uuid
project.handle_start_read = self.__start_project_read
project.handle_insert_model_item = self.insert_model_item
project.handle_remove_model_item = self.remove_model_item
project.handle_finish_read = self.__finish_project_read
self.__project_item_inserted_listener = project.item_inserted_event.listen(self.__project_item_inserted)
self.__project_item_removed_listener = project.item_removed_event.listen(self.__project_item_removed)
self.__project_property_changed_listener = project.property_changed_event.listen(self.__project_property_changed)
self.storage_cache = storage_cache
self.__storage_cache = None # needed to deallocate
if not storage_cache:
self.__storage_cache = Cache.DictStorageCache()
self.storage_cache = self.__storage_cache
self.__transaction_manager = TransactionManager(self)
self.__data_structure_listeners = dict()
self.__live_data_items_lock = threading.RLock()
self.__live_data_items = dict()
self.__dependency_tree_lock = threading.RLock()
self.__dependency_tree_source_to_target_map = dict()
self.__dependency_tree_target_to_source_map = dict()
self.__computation_changed_listeners = dict()
self.__computation_output_changed_listeners = dict()
self.__computation_changed_delay_list = None
self.__data_item_references = dict()
self.__computation_queue_lock = threading.RLock()
self.__computation_pending_queue = list() # type: typing.List[ComputationQueueItem]
self.__computation_active_item = None # type: typing.Optional[ComputationQueueItem]
self.__data_items = list()
self.__display_items = list()
self.__data_structures = list()
self.__computations = list()
self.__connections = list()
self.__display_item_item_inserted_listeners = dict()
self.__display_item_item_removed_listeners = dict()
self.session_id = None
self.start_new_session()
self.__prune()
for data_group in self.data_groups:
data_group.connect_display_items(self.__resolve_display_item_specifier)
self.__pending_data_item_updates_lock = threading.RLock()
self.__pending_data_item_updates = list()
self.__pending_data_item_merge_lock = threading.RLock()
self.__pending_data_item_merge = None
self.__current_computation = None
self.__call_soon_queue = list()
self.__call_soon_queue_lock = threading.RLock()
self.call_soon_event = Event.Event()
self.__hardware_source_bridge = hardware_source_manager().register_document_model(self)
# the implicit connections watch for computations matching specific criteria and then set up
# connections between inputs/outputs of the computation. for instance, when the user changes
# the display interval on a line profile resulting from a pick-style operation, it can be
# linked to the slice interval on the collection of 1D data from which the pick was computed.
# in addition, the implicit connections track implicit dependencies - this is helpful so that
# when dragging the interval on the line plot, the source data is treated as under transaction
# which dramatically improves performance during dragging.
self.__implicit_dependencies = list()
self.__implicit_map_connection = ImplicitMapConnection(self)
self.__implicit_pick_connection = ImplicitPickConnection(self)
self.__implicit_line_profile_intervals_connection = ImplicitLineProfileIntervalsConnection(self)
for index, item in enumerate(self.__project.data_items):
self.__project_item_inserted("data_items", item, index)
for index, item in enumerate(self.__project.display_items):
self.__project_item_inserted("display_items", item, index)
for index, item in enumerate(self.__project.data_structures):
self.__project_item_inserted("data_structures", item, index)
for index, item in enumerate(self.__project.computations):
self.__project_item_inserted("computations", item, index)
for index, item in enumerate(self.__project.connections):
self.__project_item_inserted("connections", item, index)
for index, item in enumerate(self.__project.data_groups):
self.__project_item_inserted("data_groups", item, index)
def __resolve_display_item_specifier(self, display_item_specifier_d: typing.Dict) -> typing.Optional[DisplayItem.DisplayItem]:
display_item_specifier = Persistence.PersistentObjectSpecifier.read(display_item_specifier_d)
return typing.cast(typing.Optional[DisplayItem.DisplayItem], self.resolve_item_specifier(display_item_specifier))
def __resolve_mapped_items(self):
# handle the reference variable assignments
for mapped_item in self._project.mapped_items:
item_proxy = self._project.create_item_proxy(
item_specifier=Persistence.PersistentObjectSpecifier.read(mapped_item))
with contextlib.closing(item_proxy):
if isinstance(item_proxy.item, DisplayItem.DisplayItem):
display_item = typing.cast(Persistence.PersistentObject, item_proxy.item)
if not display_item in MappedItemManager().item_map.values():
MappedItemManager().register(self, item_proxy.item)
def __resolve_data_item_references(self):
# update the data item references
data_item_references = self._project.data_item_references
for key, data_item_specifier in data_item_references.items():
persistent_object_specifier = Persistence.PersistentObjectSpecifier.read(data_item_specifier)
if key in self.__data_item_references:
self.__data_item_references[key].set_data_item_specifier(self._project, persistent_object_specifier)
else:
self.__data_item_references.setdefault(key, DocumentModel.DataItemReference(self, key, self._project, persistent_object_specifier))
def __prune(self):
self._project.prune()
def close(self):
with self.__call_soon_queue_lock:
self.__call_soon_queue = list()
# notify listeners
self.about_to_close_event.fire()
# stop computations
with self.__computation_queue_lock:
self.__computation_pending_queue.clear()
if self.__computation_active_item:
self.__computation_active_item.valid = False
self.__computation_active_item = None
with self.__pending_data_item_merge_lock:
if self.__pending_data_item_merge:
self.__pending_data_item_merge.close()
self.__pending_data_item_merge = None
# r_vars
MappedItemManager().unregister_document(self)
# close implicit connections
self.__implicit_map_connection.close()
self.__implicit_map_connection = None
self.__implicit_pick_connection.close()
self.__implicit_pick_connection = None
self.__implicit_line_profile_intervals_connection.close()
self.__implicit_line_profile_intervals_connection = None
# make sure the data item references shut down cleanly
for data_item_reference in self.__data_item_references.values():
data_item_reference.close()
self.__data_item_references = None
if self.__hardware_source_bridge:
self.__hardware_source_bridge.close()
self.__hardware_source_bridge = None
if self.__storage_cache:
self.__storage_cache.close()
self.__storage_cache = None
self.__computation_thread_pool.close()
self.__transaction_manager.close()
self.__transaction_manager = None
for display_item in self.__display_items:
self.__display_item_item_inserted_listeners.pop(display_item).close()
self.__display_item_item_removed_listeners.pop(display_item).close()
if self.__project_item_inserted_listener:
self.__project_item_inserted_listener.close()
self.__project_item_inserted_listener = None
if self.__project_item_removed_listener:
self.__project_item_removed_listener.close()
self.__project_item_removed_listener = None
if self.__project_property_changed_listener:
self.__project_property_changed_listener.close()
self.__project_property_changed_listener = None
for computation in self.__computations:
computation_changed_listener = self.__computation_changed_listeners.pop(computation, None)
if computation_changed_listener: computation_changed_listener.close()
computation_output_changed_listener = self.__computation_output_changed_listeners.pop(computation, None)
if computation_output_changed_listener: computation_output_changed_listener.close()
self.__project.persistent_object_context = None
self.__project.close()
self.__project = None
self.__class__.count -= 1
def __call_soon(self, fn):
# add the function to the queue of items to call on the main thread.
# use a queue here in case it is called before the listener is configured,
# as is the case as the document loads.
with self.__call_soon_queue_lock:
self.__call_soon_queue.append(fn)
self.call_soon_event.fire_any()
def _call_soon(self, fn: typing.Callable[[], None]) -> None:
self.__call_soon(fn)
def perform_call_soon(self):
# call one function in the call soon queue
fn = None
with self.__call_soon_queue_lock:
if self.__call_soon_queue:
fn = self.__call_soon_queue.pop(0)
if fn:
fn()
def perform_all_call_soon(self):
# call all functions in the call soon queue
with self.__call_soon_queue_lock:
call_soon_queue = self.__call_soon_queue
self.__call_soon_queue = list()
for fn in call_soon_queue:
fn()
def about_to_delete(self):
# override from ReferenceCounted. several DocumentControllers may retain references
self.close()
def __project_item_inserted(self, name: str, item, before_index: int) -> None:
if name == "data_items":
self.__handle_data_item_inserted(item)
elif name == "display_items":
self.__handle_display_item_inserted(item)
elif name == "data_structures":
self.__handle_data_structure_inserted(item)
elif name == "computations":
self.__handle_computation_inserted(item)
elif name == "connections":
self.__handle_connection_inserted(item)
elif name == "data_groups":
self.notify_insert_item("data_groups", item, before_index)
item.connect_display_items(self.__resolve_display_item_specifier)
def __project_item_removed(self, name: str, item, index: int) -> None:
if name == "data_items":
self.__handle_data_item_removed(item)
elif name == "display_items":
self.__handle_display_item_removed(item)
elif name == "data_structures":
self.__handle_data_structure_removed(item)
elif name == "computations":
self.__handle_computation_removed(item)
elif name == "connections":
self.__handle_connection_removed(item)
elif name == "data_groups":
item.disconnect_display_items()
self.notify_remove_item("data_groups", item, index)
def __project_property_changed(self, name: str) -> None:
if name == "data_item_references":
self.__resolve_data_item_references()
if name == "mapped_items":
self.__resolve_mapped_items()
def create_item_proxy(self, *, item_uuid: uuid.UUID = None, item_specifier: Persistence.PersistentObjectSpecifier = None, item: Persistence.PersistentObject = None) -> Persistence.PersistentObjectProxy:
# returns item proxy in projects. used in data group hierarchy.
return self._project.create_item_proxy(item_uuid=item_uuid, item_specifier=item_specifier, item=item)
def resolve_item_specifier(self, item_specifier: Persistence.PersistentObjectSpecifier) -> Persistence.PersistentObject:
return self._project.resolve_item_specifier(item_specifier)
@property
def modified_state(self) -> int:
return self._project.modified_state
@modified_state.setter
def modified_state(self, value: int) -> None:
self._project.modified_state = value
@property
def data_items(self) -> typing.List[DataItem.DataItem]:
return self.__data_items
@property
def display_items(self) -> typing.List[DisplayItem.DisplayItem]:
return self.__display_items
@property
def data_structures(self) -> typing.List[DataStructure.DataStructure]:
return self.__data_structures
@property
def computations(self) -> typing.List[Symbolic.Computation]:
return self.__computations
@property
def connections(self) -> typing.List[Connection.Connection]:
return self.__connections
@property
def _project(self) -> Project.Project:
return self.__project
@property
def implicit_dependencies(self):
return self.__implicit_dependencies
def register_implicit_dependency(self, implicit_dependency: AbstractImplicitDependency):
self.__implicit_dependencies.append(implicit_dependency)
def unregister_implicit_dependency(self, implicit_dependency: AbstractImplicitDependency):
self.__implicit_dependencies.remove(implicit_dependency)
def start_new_session(self):
self.session_id = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
@property
def current_session_id(self):
return self.session_id
def copy_data_item(self, data_item: DataItem.DataItem) -> DataItem.DataItem:
computation_copy = copy.deepcopy(self.get_data_item_computation(data_item))
data_item_copy = copy.deepcopy(data_item)
self.append_data_item(data_item_copy)
if computation_copy:
computation_copy.source = None
computation_copy._clear_referenced_object("target")
self.set_data_item_computation(data_item_copy, computation_copy)
return data_item_copy
def __handle_data_item_inserted(self, data_item: DataItem.DataItem) -> None:
assert data_item is not None
assert data_item not in self.data_items
# data item bookkeeping
data_item.set_storage_cache(self.storage_cache)
# insert in internal list
before_index = len(self.__data_items)
self.__data_items.append(data_item)
data_item._document_model = self
data_item.set_session_manager(self)
self.notify_insert_item("data_items", data_item, before_index)
self.__transaction_manager._add_item(data_item)
def __handle_data_item_removed(self, data_item: DataItem.DataItem) -> None:
self.__transaction_manager._remove_item(data_item)
library_computation = self.get_data_item_computation(data_item)
with self.__computation_queue_lock:
computation_pending_queue = self.__computation_pending_queue
self.__computation_pending_queue = list()
for computation_queue_item in computation_pending_queue:
if not computation_queue_item.computation is library_computation:
self.__computation_pending_queue.append(computation_queue_item)
if self.__computation_active_item and library_computation is self.__computation_active_item.computation:
self.__computation_active_item.valid = False
# remove data item from any selections
self.data_item_will_be_removed_event.fire(data_item)
# remove it from the persistent_storage
data_item._document_model = None
assert data_item is not None
assert data_item in self.data_items
index = self.data_items.index(data_item)
self.__data_items.remove(data_item)
self.notify_remove_item("data_items", data_item, index)
def append_data_item(self, data_item: DataItem.DataItem, auto_display: bool = True) -> None:
data_item.session_id = self.session_id
self._project.append_data_item(data_item)
# automatically add a display
if auto_display:
display_item = DisplayItem.DisplayItem(data_item=data_item)
self.append_display_item(display_item)
def insert_data_item(self, index: int, data_item: DataItem.DataItem, auto_display: bool = True) -> None:
uuid_order = save_item_order(self.__data_items)
self.append_data_item(data_item, auto_display=auto_display)
insert_item_order(uuid_order, index, data_item)
self.__data_items = restore_item_order(self._project, uuid_order)
def remove_data_item(self, data_item: DataItem.DataItem, *, safe: bool=False) -> None:
self.__cascade_delete(data_item, safe=safe).close()
def remove_data_item_with_log(self, data_item: DataItem.DataItem, *, safe: bool=False) -> Changes.UndeleteLog:
return self.__cascade_delete(data_item, safe=safe)
def restore_data_item(self, data_item_uuid: uuid.UUID, before_index: int=None) -> typing.Optional[DataItem.DataItem]:
return self._project.restore_data_item(data_item_uuid)
def restore_items_order(self, name: str, order: typing.List[typing.Tuple[Project.Project, Persistence.PersistentObject]]) -> None:
if name == "data_items":
self.__data_items = restore_item_order(self._project, order)
elif name == "display_items":
self.__display_items = restore_item_order(self._project, order)
elif name == "data_strutures":
self.__data_structures = restore_item_order(self._project, order)
elif name == "computations":
self.__computations = restore_item_order(self._project, order)
elif name == "connections":
self.__connections = restore_item_order(self._project, order)
def deepcopy_display_item(self, display_item: DisplayItem.DisplayItem) -> DisplayItem.DisplayItem:
display_item_copy = copy.deepcopy(display_item)
data_item_copies = list()
for data_item in display_item.data_items:
if data_item:
data_item_copy = copy.deepcopy(data_item)
self.append_data_item(data_item_copy, False)
data_item_copies.append(data_item_copy)
else:
data_item_copies.append(None)
for display_data_channel in copy.copy(display_item_copy.display_data_channels):
display_item_copy.remove_display_data_channel(display_data_channel).close()
for data_item_copy, display_data_channel in zip(data_item_copies, display_item.display_data_channels):
display_data_channel_copy = DisplayItem.DisplayDataChannel(data_item=data_item_copy)
display_data_channel_copy.copy_display_data_properties_from(display_data_channel)
display_item_copy.append_display_data_channel(display_data_channel_copy)
for display_layer in copy.copy(display_item_copy.display_layers):
display_item_copy.remove_display_layer(display_layer).close()
for i in range(len(display_item.display_layers)):
data_index = display_item.display_data_channels.index(display_item.get_display_layer_display_data_channel(i))
display_item_copy.add_display_layer_for_display_data_channel(display_item_copy.display_data_channels[data_index], **display_item.get_display_layer_properties(i))
self.append_display_item(display_item_copy)
return display_item_copy
def append_display_item(self, display_item: DisplayItem.DisplayItem, *, update_session: bool = True) -> None:
if update_session:
display_item.session_id = self.session_id
self._project.append_display_item(display_item)
def insert_display_item(self, before_index: int, display_item: DisplayItem.DisplayItem, *, update_session: bool = True) -> None:
uuid_order = save_item_order(self.__display_items)
self.append_display_item(display_item, update_session=update_session)
insert_item_order(uuid_order, before_index, display_item)
self.__display_items = restore_item_order(self._project, uuid_order)
def remove_display_item(self, display_item) -> None:
self.__cascade_delete(display_item).close()
def remove_display_item_with_log(self, display_item) -> Changes.UndeleteLog:
return self.__cascade_delete(display_item)
def __handle_display_item_inserted(self, display_item: DisplayItem.DisplayItem) -> None:
assert display_item is not None
assert display_item not in self.__display_items
# data item bookkeeping
display_item.set_storage_cache(self.storage_cache)
# insert in internal list
before_index = len(self.__display_items)
self.__display_items.append(display_item)
def item_changed(display_item: DisplayItem.DisplayItem, name: str, value, index: int) -> None:
# pass display item because display data channel might be being removed in which case it will have no container.
if name == "display_data_channels":
# handle cases where a display data channel is added or removed.
# update the related items. this is a blunt approach - they may not
# have changed, but a display update is relatively cheap.
assert display_item
source_display_items = self.get_source_display_items(display_item) if display_item else list()
dependent_display_items = self.get_dependent_display_items(display_item) if display_item else list()
self.related_items_changed.fire(display_item, source_display_items, dependent_display_items)
self.__display_item_item_inserted_listeners[display_item] = display_item.item_inserted_event.listen(functools.partial(item_changed, display_item))
self.__display_item_item_removed_listeners[display_item] = display_item.item_removed_event.listen(functools.partial(item_changed, display_item))
# send notifications
self.notify_insert_item("display_items", display_item, before_index)
def __handle_display_item_removed(self, display_item: DisplayItem.DisplayItem) -> None:
# remove it from the persistent_storage
assert display_item is not None
assert display_item in self.__display_items
index = self.__display_items.index(display_item)
self.notify_remove_item("display_items", display_item, index)
self.__display_items.remove(display_item)
self.__display_item_item_inserted_listeners.pop(display_item).close()
self.__display_item_item_removed_listeners.pop(display_item).close()
def __start_project_read(self) -> None:
pass
def __finish_project_read(self) -> None:
self.__hardware_source_bridge.clean_display_items()
def insert_model_item(self, container, name, before_index, item):
container.insert_item(name, before_index, item)
def remove_model_item(self, container, name, item, *, safe: bool=False) -> Changes.UndeleteLog:
return self.__cascade_delete(item, safe=safe)
def assign_variable_to_display_item(self, display_item: DisplayItem.DisplayItem) -> str:
r_var = MappedItemManager().get_item_r_var(display_item)
if not r_var:
r_var = MappedItemManager().register(self, display_item)
mapped_items = self._project.mapped_items
mapped_items.append(display_item.project.create_specifier(display_item).write())
self._project.mapped_items = mapped_items
return r_var
def __build_cascade(self, item, items: list, dependencies: list) -> None:
# build a list of items to delete using item as the base. put the leafs at the end of the list.
# store associated dependencies in the form source -> target into dependencies.
# print(f"build {item}")
if item not in items:
# first handle the case where a data item that is the only target of a graphic cascades to the graphic.
# this is the only case where a target causes a source to be deleted.
items.append(item)
sources = self.__dependency_tree_target_to_source_map.get(weakref.ref(item), list())
if isinstance(item, DataItem.DataItem):
for source in sources:
if isinstance(source, Graphics.Graphic):
source_targets = self.__dependency_tree_source_to_target_map.get(weakref.ref(source), list())
if len(source_targets) == 1 and source_targets[0] == item:
self.__build_cascade(source, items, dependencies)
# delete display items whose only data item is being deleted
for display_item in self.get_display_items_for_data_item(item):
display_item_alive = False
for display_data_channel in display_item.display_data_channels:
if display_data_channel.data_item == item:
self.__build_cascade(display_data_channel, items, dependencies)
elif not display_data_channel.data_item in items:
display_item_alive = True
if not display_item_alive:
self.__build_cascade(display_item, items, dependencies)
elif isinstance(item, DisplayItem.DisplayItem):
# graphics on a display item are deleted.
for graphic in item.graphics:
self.__build_cascade(graphic, items, dependencies)
# display data channels are deleted.
for display_data_channel in item.display_data_channels:
self.__build_cascade(display_data_channel, items, dependencies)
# delete data items whose only display item is being deleted
for data_item in item.data_items:
if data_item and len(self.get_display_items_for_data_item(data_item)) == 1:
self.__build_cascade(data_item, items, dependencies)
elif isinstance(item, DisplayItem.DisplayDataChannel):
# delete data items whose only display item channel is being deleted
display_item = typing.cast(DisplayItem.DisplayItem, item.container)
data_item = typing.cast(typing.Optional[DataItem.DataItem], item.data_item)
if data_item and len(self.get_display_items_for_data_item(data_item)) == 1:
display_data_channels_referring_to_data_item = 0
# only delete data item if it is used by only the one display data channel being deleted
for display_data_channel in display_item.display_data_channels:
if display_data_channel.data_item == data_item:
display_data_channels_referring_to_data_item += 1
if display_data_channels_referring_to_data_item == 1:
self.__build_cascade(data_item, items, dependencies)
for display_layer in display_item.display_layers:
if display_layer.display_data_channel == item:
self.__build_cascade(display_layer, items, dependencies)
elif isinstance(item, DisplayItem.DisplayLayer):
# delete display data channels whose only referencing display layer is being deleted
display_layer = typing.cast(DisplayItem.DisplayLayer, item)
display_data_channel = display_layer.display_data_channel
display_item = typing.cast(DisplayItem.DisplayItem, item.container)
reference_count = display_item.get_display_data_channel_layer_use_count(display_layer.display_data_channel)
if reference_count == 1:
self.__build_cascade(display_data_channel, items, dependencies)
# outputs of a computation are deleted.
elif isinstance(item, Symbolic.Computation):
for output in item._outputs:
self.__build_cascade(output, items, dependencies)
# dependencies are deleted
# in order to be able to have finer control over how dependencies of input lists are handled,
# enumerate the computations and match up dependencies instead of using the dependency tree.
# this could obviously be optimized.
if not isinstance(item, Symbolic.Computation):
for computation in self.computations:
base_objects = set(computation.direct_input_items)
if item in base_objects:
targets = computation._outputs
for target in targets:
if (item, target) not in dependencies:
dependencies.append((item, target))
self.__build_cascade(target, items, dependencies)
# dependencies are deleted
# see note above
# targets = self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list())
# for target in targets:
# if (item, target) not in dependencies:
# dependencies.append((item, target))
# self.__build_cascade(target, items, dependencies)
# data items whose source is the item are deleted
for data_item in self.data_items:
if data_item.source == item:
if (item, data_item) not in dependencies:
dependencies.append((item, data_item))
self.__build_cascade(data_item, items, dependencies)
# display items whose source is the item are deleted
for display_item in self.display_items:
pass
# if display_item.source == item:
# if (item, display_item) not in dependencies:
# dependencies.append((item, display_item))
# self.__build_cascade(display_item, items, dependencies)
# graphics whose source is the item are deleted
for display_item in self.display_items:
for graphic in display_item.graphics:
if graphic.source == item:
if (item, graphic) not in dependencies:
dependencies.append((item, graphic))
self.__build_cascade(graphic, items, dependencies)
# connections whose source is the item are deleted
for connection in self.connections:
if connection.parent == item:
if (item, connection) not in dependencies:
dependencies.append((item, connection))
self.__build_cascade(connection, items, dependencies)
# data structures whose source is the item are deleted
for data_structure in self.data_structures:
if data_structure.source == item:
if (item, data_structure) not in dependencies:
dependencies.append((item, data_structure))
self.__build_cascade(data_structure, items, dependencies)
# computations whose source is the item are deleted
for computation in self.computations:
if computation.source == item or not computation.is_valid_with_removals(set(items)):
if (item, computation) not in dependencies:
dependencies.append((item, computation))
self.__build_cascade(computation, items, dependencies)
# item is being removed; so remove any dependency from any source to this item
for source in sources:
if (source, item) not in dependencies:
dependencies.append((source, item))
def __cascade_delete(self, master_item, safe: bool=False) -> Changes.UndeleteLog:
with self.transaction_context():
return self.__cascade_delete_inner(master_item, safe=safe)
def __cascade_delete_inner(self, master_item, safe: bool=False) -> Changes.UndeleteLog:
"""Cascade delete an item.
Returns an undelete log that can be used to undo the cascade deletion.
Builds a cascade of items to be deleted and dependencies to be removed when the passed item is deleted. Then
removes computations that are no longer valid. Removing a computation may result in more deletions, so the
process is repeated until nothing more gets removed.
Next remove dependencies.
Next remove individual items (from the most distant from the root item to the root item).
"""
# print(f"cascade {master_item}")
# this horrible little hack ensures that computation changed messages are delayed until the end of the cascade
# delete; otherwise there are cases where dependencies can be reestablished during the changed messages while
# this method is partially finished. ugh. see test_computation_deletes_when_source_cycle_deletes.
if self.__computation_changed_delay_list is None:
computation_changed_delay_list = list()
self.__computation_changed_delay_list = computation_changed_delay_list
else:
computation_changed_delay_list = None
undelete_log = Changes.UndeleteLog()
try:
items = list()
dependencies = list()
self.__build_cascade(master_item, items, dependencies)
cascaded = True
while cascaded:
cascaded = False
# adjust computation bookkeeping to remove deleted items, then delete unused computations
items_set = set(items)
for computation in copy.copy(self.computations):
input_deleted = not items_set.isdisjoint(computation.direct_input_items)
output_deleted = not items_set.isdisjoint(computation.output_items)
computation._inputs -= items_set
computation._outputs -= items_set
if computation not in items and computation != self.__current_computation:
# computations are auto deleted if any input or output is deleted.
if output_deleted or not computation._inputs or input_deleted:
self.__build_cascade(computation, items, dependencies)
cascaded = True
# print(list(reversed(items)))
# print(list(reversed(dependencies)))
for source, target in reversed(dependencies):
self.__remove_dependency(source, target)
# now delete the actual items
for item in reversed(items):
for computation in self.computations:
t = computation.list_item_removed(item)
if t is not None:
index, variable_index, object_specifier = t
undelete_log.append(UndeleteObjectSpecifier(self, computation, index, variable_index, object_specifier))
for item in reversed(items):
container = item.container
# if container is None, then this object has already been removed
if isinstance(container, Project.Project) and isinstance(item, DataItem.DataItem):
undelete_log.append(UndeleteDataItem(self, item))
# call the version of remove_data_item that doesn't cascade again
# NOTE: remove_data_item will notify_remove_item
container.remove_data_item(item)
elif isinstance(container, Project.Project) and isinstance(item, DisplayItem.DisplayItem):
# remove the data item from any groups
for data_group in self.get_flat_data_group_generator():
if item in data_group.display_items:
undelete_log.append(UndeleteDisplayItemInDataGroup(self, item, data_group))
data_group.remove_display_item(item)
undelete_log.append(UndeleteDisplayItem(self, item))
# call the version of remove_display_item that doesn't cascade again
# NOTE: remove_display_item will notify_remove_item
container.remove_display_item(item)
elif isinstance(container, Project.Project) and isinstance(item, DataStructure.DataStructure):
undelete_log.append(UndeleteItem(DataStructuresController(self), item))
container.remove_item("data_structures", item)
elif isinstance(container, Project.Project) and isinstance(item, Symbolic.Computation):
undelete_log.append(UndeleteItem(ComputationsController(self), item))
container.remove_item("computations", item)
if item in self.__computation_changed_delay_list:
self.__computation_changed_delay_list.remove(item)
elif isinstance(container, Project.Project) and isinstance(item, Connection.Connection):
undelete_log.append(UndeleteItem(ConnectionsController(self), item))
container.remove_item("connections", item)
elif container and isinstance(item, Graphics.Graphic):
undelete_log.append(UndeleteItem(GraphicsController(self), item))
container.remove_item("graphics", item)
elif container and isinstance(item, DisplayItem.DisplayDataChannel):
undelete_log.append(UndeleteItem(DisplayDataChannelsController(self), item))
container.remove_item("display_data_channels", item)
elif container and isinstance(item, DisplayItem.DisplayLayer):
undelete_log.append(UndeleteItem(DisplayLayersController(self), item))
container.remove_item("display_layers", item)
except Exception as e:
import sys, traceback
traceback.print_exc()
traceback.format_exception(*sys.exc_info())
raise
finally:
# check whether this call of __cascade_delete is the top level one that will finish the computation
# changed messages.
if computation_changed_delay_list is not None:
self.__finish_computation_changed()
return undelete_log
def undelete_all(self, undelete_log: Changes.UndeleteLog) -> None:
undelete_log.undelete_all(self)
def __remove_dependency(self, source_item, target_item):
# print(f"remove dependency {source_item} {target_item}")
with self.__dependency_tree_lock:
target_items = self.__dependency_tree_source_to_target_map.setdefault(weakref.ref(source_item), list())
if target_item in target_items:
target_items.remove(target_item)
if not target_items:
self.__dependency_tree_source_to_target_map.pop(weakref.ref(source_item), None)
source_items = self.__dependency_tree_target_to_source_map.setdefault(weakref.ref(target_item), list())
if source_item in source_items:
source_items.remove(source_item)
if not source_items:
self.__dependency_tree_target_to_source_map.pop(weakref.ref(target_item), None)
if isinstance(source_item, DataItem.DataItem) and isinstance(target_item, DataItem.DataItem):
# propagate live states to dependents
if source_item.is_live:
self.end_data_item_live(target_item)
self.dependency_removed_event.fire(source_item, target_item)
# fire the display messages
if isinstance(source_item, DataItem.DataItem):
for display_item in self.get_display_items_for_data_item(source_item):
source_display_items = self.get_source_display_items(display_item) if display_item else list()
dependent_display_items = self.get_dependent_display_items(display_item) if display_item else list()
self.related_items_changed.fire(display_item, source_display_items, dependent_display_items)
def __add_dependency(self, source_item, target_item):
# print(f"add dependency {source_item} {target_item}")
with self.__dependency_tree_lock:
self.__dependency_tree_source_to_target_map.setdefault(weakref.ref(source_item), list()).append(target_item)
self.__dependency_tree_target_to_source_map.setdefault(weakref.ref(target_item), list()).append(source_item)
if isinstance(source_item, DataItem.DataItem) and isinstance(target_item, DataItem.DataItem):
# propagate live states to dependents
if source_item.is_live:
self.begin_data_item_live(target_item)
self.dependency_added_event.fire(source_item, target_item)
# fire the display messages
if isinstance(source_item, DataItem.DataItem):
for display_item in self.get_display_items_for_data_item(source_item):
source_display_items = self.get_source_display_items(display_item) if display_item else list()
dependent_display_items = self.get_dependent_display_items(display_item) if display_item else list()
self.related_items_changed.fire(display_item, source_display_items, dependent_display_items)
def __computation_needs_update(self, computation: Symbolic.Computation) -> None:
# When the computation for a data item is set or mutated, this function will be called.
# This function looks through the existing pending computation queue, and if this data
# item is not already in the queue, it adds it and ensures the dispatch thread eventually
# executes the computation.
with self.__computation_queue_lock:
for computation_queue_item in self.__computation_pending_queue:
if computation and computation_queue_item.computation == computation:
return
computation_queue_item = ComputationQueueItem(computation=computation)
self.__computation_pending_queue.append(computation_queue_item)
self.dispatch_task(self.__recompute)
def __establish_computation_dependencies(self, old_inputs: typing.Set, new_inputs: typing.Set, old_outputs: typing.Set, new_outputs: typing.Set) -> None:
# establish dependencies between input and output items.
with self.__dependency_tree_lock:
removed_inputs = old_inputs - new_inputs
added_inputs = new_inputs - old_inputs
removed_outputs = old_outputs - new_outputs
added_outputs = new_outputs - old_outputs
same_inputs = old_inputs.intersection(new_inputs)
# a, b -> x, y => a, c => x, z
# [a -> x, a -> y, b -> x, b -> y]
# [a -> x, a -> z, c -> x, c -> z]
# old_inputs = [a, b]
# new_inputs = [a, c]
# removed inputs = [b]
# added_inputs = [c]
# old_outputs = [x, y]
# new_outputs = [x, z]
# removed_outputs = [y]
# added_outputs = [z]
# for each removed input, remove dependency to old outputs: [a -> x, a -> y]
# for each removed output, remove dependency from old inputs to that output: [a -> x]
# for each added input, add dependency to new outputs: [a -> x, c -> x, c -> z]
# for each added output, add dependency from unchanged inputs to that output: [a -> x, a -> z, c -> x, c -> z]
for input in removed_inputs:
for output in old_outputs:
self.__remove_dependency(input, output)
for output in removed_outputs:
for input in old_inputs:
self.__remove_dependency(input, output)
for input in added_inputs:
for output in new_outputs:
self.__add_dependency(input, output)
for output in added_outputs:
for input in same_inputs:
self.__add_dependency(input, output)
if removed_inputs or added_inputs or removed_outputs or added_outputs:
self.__transaction_manager._rebuild_transactions()
# live state, and dependencies
def get_source_items(self, item) -> typing.List:
with self.__dependency_tree_lock:
return copy.copy(self.__dependency_tree_target_to_source_map.get(weakref.ref(item), list()))
def get_dependent_items(self, item) -> typing.List:
"""Return the list of data items containing data that directly depends on data in this item."""
with self.__dependency_tree_lock:
return copy.copy(self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list()))
def __get_deep_dependent_item_set(self, item, item_set) -> None:
"""Return the list of data items containing data that directly depends on data in this item."""
if not item in item_set:
item_set.add(item)
with self.__dependency_tree_lock:
for dependent in self.get_dependent_items(item):
self.__get_deep_dependent_item_set(dependent, item_set)
def get_source_data_items(self, data_item: DataItem.DataItem) -> typing.List[DataItem.DataItem]:
with self.__dependency_tree_lock:
return [data_item for data_item in self.__dependency_tree_target_to_source_map.get(weakref.ref(data_item), list()) if isinstance(data_item, DataItem.DataItem)]
def get_dependent_data_items(self, data_item: DataItem.DataItem) -> typing.List[DataItem.DataItem]:
"""Return the list of data items containing data that directly depends on data in this item."""
with self.__dependency_tree_lock:
return [data_item for data_item in self.__dependency_tree_source_to_target_map.get(weakref.ref(data_item), list()) if isinstance(data_item, DataItem.DataItem)]
def get_source_display_items(self, display_item: DisplayItem.DisplayItem) -> typing.List[DisplayItem.DisplayItem]:
display_items = list()
for data_item in display_item.data_items:
if data_item: # may be none for missing data
for data_item_ in self.get_source_data_items(data_item):
for display_item_ in self.get_display_items_for_data_item(data_item_):
if display_item_ not in display_items and display_item_ != display_item:
display_items.append(display_item_)
return display_items
def get_dependent_display_items(self, display_item: DisplayItem.DisplayItem) -> typing.List[DisplayItem.DisplayItem]:
display_items = list()
for data_item in display_item.data_items:
if data_item: # may be none for missing data
for data_item_ in self.get_dependent_data_items(data_item):
for display_item_ in self.get_display_items_for_data_item(data_item_):
if display_item_ not in display_items and display_item_ != display_item:
display_items.append(display_item_)
return display_items
def transaction_context(self):
"""Return a context object for a document-wide transaction."""
class Transaction:
def __init__(self, document_model: DocumentModel):
self.__document_model = document_model
def __enter__(self):
self.__document_model._project.project_storage_system.enter_transaction()
return self
def __exit__(self, type, value, traceback):
self.__document_model._project.project_storage_system.exit_transaction()
return Transaction(self)
def item_transaction(self, item) -> Transaction:
return self.__transaction_manager.item_transaction(item)
def is_in_transaction_state(self, item) -> bool:
return self.__transaction_manager.is_in_transaction_state(item)
@property
def transaction_count(self):
return self.__transaction_manager.transaction_count
def begin_display_item_transaction(self, display_item: DisplayItem.DisplayItem) -> Transaction:
if display_item:
return self.item_transaction(display_item)
else:
return self.__transaction_manager.item_transaction(set())
def data_item_live(self, data_item):
""" Return a context manager to put the data item in a 'live state'. """
class LiveContextManager:
def __init__(self, manager, object):
self.__manager = manager
self.__object = object
def __enter__(self):
self.__manager.begin_data_item_live(self.__object)
return self
def __exit__(self, type, value, traceback):
self.__manager.end_data_item_live(self.__object)
return LiveContextManager(self, data_item)
def begin_data_item_live(self, data_item):
"""Begins a live state for the data item.
The live state is propagated to dependent data items.
This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive.
"""
with self.__live_data_items_lock:
old_live_count = self.__live_data_items.get(data_item.uuid, 0)
self.__live_data_items[data_item.uuid] = old_live_count + 1
if old_live_count == 0:
data_item._enter_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.begin_data_item_live(dependent_data_item)
def end_data_item_live(self, data_item):
"""Ends a live state for the data item.
The live-ness property is propagated to dependent data items, similar to the transactions.
This method is thread safe.
"""
with self.__live_data_items_lock:
live_count = self.__live_data_items.get(data_item.uuid, 0) - 1
assert live_count >= 0
self.__live_data_items[data_item.uuid] = live_count
if live_count == 0:
data_item._exit_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.end_data_item_live(dependent_data_item)
# data groups
def append_data_group(self, data_group):
self.insert_data_group(len(self.data_groups), data_group)
def insert_data_group(self, before_index, data_group):
self._project.insert_item("data_groups", before_index, data_group)
def remove_data_group(self, data_group):
self._project.remove_item("data_groups", data_group)
def create_default_data_groups(self):
# ensure there is at least one group
if len(self.data_groups) < 1:
data_group = DataGroup.DataGroup()
data_group.title = _("My Data")
self.append_data_group(data_group)
# Return a generator over all data groups
def get_flat_data_group_generator(self):
return DataGroup.get_flat_data_group_generator_in_container(self)
def get_data_group_by_uuid(self, uuid: uuid.UUID):
for data_group in DataGroup.get_flat_data_group_generator_in_container(self):
if data_group.uuid == uuid:
return data_group
return None
def get_display_items_for_data_item(self, data_item: typing.Optional[DataItem.DataItem]) -> typing.Set[DisplayItem.DisplayItem]:
# return the set of display items for the data item
display_items = set()
if data_item:
for display_data_channel in data_item.display_data_channels:
display_items.add(display_data_channel.container)
return display_items
def get_any_display_item_for_data_item(self, data_item: typing.Optional[DataItem.DataItem]) -> typing.Optional[DisplayItem.DisplayItem]:
# return the first display item containing the data item.
# ordering is preserved (useful, at least for tests).
for display_item in self.display_items:
if data_item in display_item.data_items:
return display_item
return None
def get_display_item_for_data_item(self, data_item: DataItem.DataItem) -> typing.Optional[DisplayItem.DisplayItem]:
display_items = self.get_display_items_for_data_item(data_item)
return next(iter(display_items)) if len(display_items) == 1 else None
def get_best_display_item_for_data_item(self, data_item: DataItem.DataItem) -> typing.Optional[DisplayItem.DisplayItem]:
display_items = self.get_display_items_for_data_item(data_item)
for display_item in display_items:
if display_item.data_item == data_item:
return display_item
return next(iter(display_items)) if len(display_items) == 1 else None
def get_or_create_data_group(self, group_name):
data_group = DataGroup.get_data_group_in_container_by_title(self, group_name)
if data_group is None:
# we create a new group
data_group = DataGroup.DataGroup()
data_group.title = group_name
self.insert_data_group(0, data_group)
return data_group
def create_computation(self, expression: str=None) -> Symbolic.Computation:
return Symbolic.Computation(expression)
def dispatch_task(self, task, description=None):
self.__computation_thread_pool.queue_fn(task, description)
def recompute_all(self, merge=True):
while True:
self.__computation_thread_pool.run_all()
if merge:
self.perform_data_item_merge()
with self.__computation_queue_lock:
if not (self.__computation_pending_queue or self.__computation_active_item or self.__pending_data_item_merge):
break
else:
break
def recompute_one(self, merge=True):
self.__computation_thread_pool.run_one()
if merge:
self.perform_data_item_merge()
def start_dispatcher(self):
self.__computation_thread_pool.start(1)
def __recompute(self):
while True:
computation_queue_item = None
with self.__computation_queue_lock:
if not self.__computation_active_item and self.__computation_pending_queue:
computation_queue_item = self.__computation_pending_queue.pop(0)
self.__computation_active_item = computation_queue_item
if computation_queue_item:
# an item was put into the active queue, so compute it, then merge
pending_data_item_merge = computation_queue_item.recompute()
if pending_data_item_merge is not None:
with self.__pending_data_item_merge_lock:
if self.__pending_data_item_merge:
self.__pending_data_item_merge.close()
self.__pending_data_item_merge = pending_data_item_merge
self.__call_soon(self.perform_data_item_merge)
else:
with self.__computation_queue_lock:
self.__computation_active_item = None
else:
break
def perform_data_item_merge(self):
with self.__pending_data_item_merge_lock:
pending_data_item_merge = self.__pending_data_item_merge
self.__pending_data_item_merge = None
if pending_data_item_merge:
computation = pending_data_item_merge.computation
self.__current_computation = computation
try:
pending_data_item_merge.exec()
finally:
self.__current_computation = None
with self.__computation_queue_lock:
self.__computation_active_item = None
computation.is_initial_computation_complete.set()
pending_data_item_merge.close()
self.dispatch_task(self.__recompute)
async def compute_immediate(self, event_loop: asyncio.AbstractEventLoop, computation: Symbolic.Computation, timeout: float=None) -> None:
if computation:
def sync_recompute():
computation.is_initial_computation_complete.wait(timeout)
await event_loop.run_in_executor(None, sync_recompute)
def get_object_specifier(self, object, object_type: str=None) -> typing.Optional[typing.Dict]:
return DataStructure.get_object_specifier(object, object_type)
def get_graphic_by_uuid(self, object_uuid: uuid.UUID) -> typing.Optional[Graphics.Graphic]:
for display_item in self.display_items:
for graphic in display_item.graphics:
if graphic.uuid == object_uuid:
return graphic
return None
class DataItemReference:
"""A data item reference to coordinate data item access between acquisition and main thread.
Call start/stop a matching number of times to start/stop using the data reference (from the
acquisition thread).
Set data_item property when it is created (from the UI thread).
This class will also track when the data item is deleted and handle it appropriately if it
happens while the acquisition thread is using it.
"""
def __init__(self, document_model: "DocumentModel", key: str, project: Project.Project, data_item_specifier: Persistence.PersistentObjectSpecifier=None):
self.__document_model = document_model
self.__key = key
self.__data_item_proxy = project.create_item_proxy(item_specifier=data_item_specifier)
self.__starts = 0
self.__pending_starts = 0
self.__data_item_transaction = None
self.mutex = threading.RLock()
self.data_item_reference_changed_event = Event.Event()
def item_unregistered(item) -> None:
# when this data item is removed, it can no longer be used.
# but to ensure that start/stop calls are matching in the case where this item
# is removed and then a new item is set, we need to copy the number of starts
# to the pending starts so when the new item is set, start gets called the right
# number of times to match the stops that will eventually be called.
self.__pending_starts = self.__starts
self.__starts = 0
self.__data_item_proxy.on_item_unregistered = item_unregistered
def close(self) -> None:
self.__data_item_proxy.close()
self.__data_item_proxy = None
@property
def key(self) -> str:
return self.__key
def set_data_item_specifier(self, project: Project.Project, data_item_specifier: Persistence.PersistentObjectSpecifier) -> None:
data_item_proxy = project.create_item_proxy(item_specifier=data_item_specifier)
if data_item_proxy.item != self.__data_item:
assert self.__starts == 0
assert self.__pending_starts == 0
assert not self.__data_item_transaction
self.__stop() # data item is changing; close existing one.
self.__data_item_proxy.close()
self.__data_item_proxy = data_item_proxy
else:
data_item_proxy.close()
@property
def __data_item(self) -> typing.Optional[DataItem.DataItem]:
return self.__data_item_proxy.item
def start(self):
"""Start using the data item reference. Must call stop a matching number of times.
Increments ref counts and begins transaction/live state.
Keeps track of pending starts if the data item has not yet been set.
This call is thread safe.
"""
if self.__data_item:
self.__start()
else:
self.__pending_starts += 1
def stop(self):
"""Stop using the data item reference. Must have called start a matching number of times.
Decrements ref counts and ends transaction/live state.
Keeps track of pending starts if the data item has not yet been set.
This call is thread safe.
"""
if self.__data_item:
self.__stop()
else:
self.__pending_starts -= 1
def __start(self):
self.__data_item.increment_data_ref_count()
self.__data_item_transaction = self.__document_model.item_transaction(self.__data_item)
self.__document_model.begin_data_item_live(self.__data_item)
self.__starts += 1
def __stop(self):
# the order of these two statements is important, at least for now (12/2013)
# when the transaction ends, the data will get written to disk, so we need to
# make sure it's still in memory. if decrement were to come before the end
# of the transaction, the data would be unloaded from memory, losing it forever.
if self.__data_item_transaction:
self.__data_item_transaction.close()
self.__data_item_transaction = None
self.__document_model.end_data_item_live(self.__data_item)
self.__data_item.decrement_data_ref_count()
self.__starts -= 1
@property
def data_item(self) -> DataItem.DataItem:
with self.mutex:
return self.__data_item
@data_item.setter
def data_item(self, value):
with self.mutex:
if self.__data_item != value:
self.__data_item_proxy.item = value
# start (internal) for each pending start.
for i in range(self.__pending_starts):
self.__start()
self.__pending_starts = 0
if self.__data_item in self.__document_model.data_items:
self.data_item_reference_changed_event.fire()
else:
def item_inserted(key, value, index):
if value == self.__data_item:
self.data_item_reference_changed_event.fire()
self.__item_inserted_listener.close()
self.__item_inserted_listener = None
self.__item_inserted_listener = self.__document_model.item_inserted_event.listen(item_inserted)
@property
def display_item(self) -> DisplayItem.DisplayItem:
return self.__document_model.get_display_item_for_data_item(self.data_item)
def _queue_data_item_update(self, data_item: DataItem.DataItem, data_and_metadata: DataAndMetadata.DataAndMetadata) -> None:
# put the data update to data_item into the pending_data_item_updates list.
# the pending_data_item_updates will be serviced when the main thread calls
# perform_data_item_updates.
if data_item:
with self.__pending_data_item_updates_lock:
found = False
pending_data_item_updates = list()
for data_item_ in self.__pending_data_item_updates:
# does it match? if so and not yet found, put the new data into the matching
# slot; but then filter the rest of the matches.
if data_item_ == data_item:
if not found:
data_item.set_pending_xdata(data_and_metadata)
pending_data_item_updates.append(data_item)
found = True
else:
pending_data_item_updates.append(data_item_)
if not found: # if not added yet, add it
data_item.set_pending_xdata(data_and_metadata)
pending_data_item_updates.append(data_item)
self.__pending_data_item_updates = pending_data_item_updates
def update_data_item_partial(self, data_item: DataItem.DataItem, data_metadata: DataAndMetadata.DataMetadata,
data_and_metadata: DataAndMetadata.DataAndMetadata, src_slice: typing.Sequence[slice],
dst_slice: typing.Sequence[slice]) -> None:
if data_item:
with self.__pending_data_item_updates_lock:
assert data_metadata
data_item.queue_partial_update(data_and_metadata, src_slice=src_slice, dst_slice=dst_slice, metadata=data_metadata)
self.__pending_data_item_updates.append(data_item)
def perform_data_item_updates(self):
assert threading.current_thread() == threading.main_thread()
with self.__pending_data_item_updates_lock:
pending_data_item_updates = self.__pending_data_item_updates
self.__pending_data_item_updates = list()
for data_item in pending_data_item_updates:
data_item.update_to_pending_xdata()
# for testing
def _get_pending_data_item_updates_count(self):
return len(self.__pending_data_item_updates)
@property
def data_groups(self) -> typing.List[DataGroup.DataGroup]:
return self._project.data_groups
def _update_data_item_reference(self, key: str, data_item: DataItem.DataItem) -> None:
assert threading.current_thread() == threading.main_thread()
if data_item:
self._project.set_data_item_reference(key, data_item)
else:
self._project.clear_data_item_reference(key)
def make_data_item_reference_key(self, *components) -> str:
return "_".join([str(component) for component in list(components) if component is not None])
def get_data_item_reference(self, key) -> DocumentModel.DataItemReference:
# this is implemented this way to avoid creating a data item reference unless it is missing.
data_item_reference = self.__data_item_references.get(key)
if data_item_reference:
return data_item_reference
return self.__data_item_references.setdefault(key, DocumentModel.DataItemReference(self, key, self._project))
def setup_channel(self, data_item_reference_key: str, data_item: DataItem.DataItem) -> None:
data_item_reference = self.get_data_item_reference(data_item_reference_key)
data_item_reference.data_item = data_item
self._update_data_item_reference(data_item_reference_key, data_item)
def get_data_item_channel_reference(self, hardware_source_id: str, channel_id: str) -> DocumentModel.DataItemReference:
return self.get_data_item_reference(self.make_data_item_reference_key(hardware_source_id, channel_id))
def update_data_item_session(self, data_item: DataItem.DataItem) -> None:
data_item.update_session(self.session_id)
def populate_action_context(self, data_item: DataItem.DataItem, d: typing.MutableMapping) -> None:
if data_item.has_metadata_value("stem.hardware_source.id"):
d["hardware_source"] = hardware_source_manager().get_hardware_source_for_hardware_source_id(data_item.get_metadata_value("stem.hardware_source.id"))
def get_display_item_snapshot_new(self, display_item: DisplayItem.DisplayItem) -> DisplayItem.DisplayItem:
display_item_copy = display_item.snapshot()
data_item_copies = list()
for data_item in display_item.data_items:
if data_item:
data_item_copy = data_item.snapshot()
self.append_data_item(data_item_copy, False)
data_item_copies.append(data_item_copy)
else:
data_item_copies.append(None)
for display_data_channel in copy.copy(display_item_copy.display_data_channels):
display_item_copy.remove_display_data_channel(display_data_channel)
for data_item_copy, display_data_channel in zip(data_item_copies, display_item.display_data_channels):
display_data_channel_copy = DisplayItem.DisplayDataChannel(data_item=data_item_copy)
display_data_channel_copy.copy_display_data_properties_from(display_data_channel)
display_item_copy.append_display_data_channel(display_data_channel_copy, display_layer=DisplayItem.DisplayLayer())
# the display layers will be disrupted by appending data channels; so just recopy them here
# this code can be simplified once display layers are objects
while len(display_item_copy.display_layers):
display_item_copy.remove_display_layer(0).close()
for i in range(len(display_item.display_layers)):
data_index = display_item.display_data_channels.index(display_item.get_display_layer_display_data_channel(i))
display_item_copy.add_display_layer_for_display_data_channel(display_item_copy.display_data_channels[data_index], **display_item.get_display_layer_properties(i))
display_item_copy.title = _("Snapshot of ") + display_item.title
self.append_display_item(display_item_copy)
return display_item_copy
def get_display_item_copy_new(self, display_item: DisplayItem.DisplayItem) -> DisplayItem.DisplayItem:
display_item_copy = display_item.snapshot()
self.append_display_item(display_item_copy)
return display_item_copy
def append_connection(self, connection: Connection.Connection) -> None:
self._project.append_connection(connection)
def insert_connection(self, before_index: int, connection: Connection.Connection) -> None:
uuid_order = save_item_order(self.__connections)
self.append_connection(connection)
insert_item_order(uuid_order, before_index, connection)
self.__connections = restore_item_order(self._project, uuid_order)
def remove_connection(self, connection: Connection.Connection) -> None:
connection.container.remove_connection(connection)
def __handle_connection_inserted(self, connection: Connection.Connection) -> None:
assert connection is not None
assert connection not in self.__connections
# insert in internal list
before_index = len(self.__connections)
self.__connections.append(connection)
# send notifications
self.notify_insert_item("connections", connection, before_index)
def __handle_connection_removed(self, connection: Connection.Connection) -> None:
# remove it from the persistent_storage
assert connection is not None
assert connection in self.__connections
index = self.__connections.index(connection)
self.notify_remove_item("connections", connection, index)
self.__connections.remove(connection)
def create_data_structure(self, *, structure_type: str=None, source=None):
return DataStructure.DataStructure(structure_type=structure_type, source=source)
def append_data_structure(self, data_structure: DataStructure.DataStructure) -> None:
self._project.append_data_structure(data_structure)
def insert_data_structure(self, before_index: int, data_structure: DataStructure.DataStructure) -> None:
uuid_order = save_item_order(self.__data_structures)
self.append_data_structure(data_structure)
insert_item_order(uuid_order, before_index, data_structure)
self.__data_structures = restore_item_order(self._project, uuid_order)
def remove_data_structure(self, data_structure: DataStructure.DataStructure) -> None:
return self.__cascade_delete(data_structure).close()
def remove_data_structure_with_log(self, data_structure: DataStructure.DataStructure) -> Changes.UndeleteLog:
return self.__cascade_delete(data_structure)
def __handle_data_structure_inserted(self, data_structure: DataStructure.DataStructure) -> None:
assert data_structure is not None
assert data_structure not in self.__data_structures
# insert in internal list
before_index = len(self.__data_structures)
self.__data_structures.append(data_structure)
# listeners
def rebuild_transactions(): self.__transaction_manager._rebuild_transactions()
self.__data_structure_listeners[data_structure] = data_structure.data_structure_objects_changed_event.listen(rebuild_transactions)
# transactions
self.__transaction_manager._add_item(data_structure)
# send notifications
self.notify_insert_item("data_structures", data_structure, before_index)
def __handle_data_structure_removed(self, data_structure: DataStructure.DataStructure) -> None:
# remove it from the persistent_storage
assert data_structure is not None
assert data_structure in self.__data_structures
# listeners
self.__data_structure_listeners[data_structure].close()
self.__data_structure_listeners.pop(data_structure, None)
# transactions
self.__transaction_manager._remove_item(data_structure)
index = self.__data_structures.index(data_structure)
# notifications
self.notify_remove_item("data_structures", data_structure, index)
# remove from internal list
self.__data_structures.remove(data_structure)
def attach_data_structure(self, data_structure, data_item):
data_structure.source = data_item
def get_data_item_computation(self, data_item: DataItem.DataItem) -> typing.Optional[Symbolic.Computation]:
for computation in self.computations:
if data_item in computation.output_items:
target_object = computation.get_output("target")
if target_object == data_item:
return computation
return None
def set_data_item_computation(self, data_item: DataItem.DataItem, computation: typing.Optional[Symbolic.Computation]) -> None:
if data_item:
old_computation = self.get_data_item_computation(data_item)
if old_computation is computation:
pass
elif computation:
computation.create_output_item("target", Symbolic.make_item(data_item), label=_("Target"))
self.append_computation(computation)
elif old_computation:
# remove old computation without cascade (it would delete this data item itself)
old_computation.valid = False
old_computation.container.remove_computation(old_computation)
def append_computation(self, computation: Symbolic.Computation) -> None:
computation.pending_project = self._project # tell the computation where it will end up so get related item works
# input/output bookkeeping
input_items = computation.get_preliminary_input_items()
output_items = computation.get_preliminary_output_items()
input_set = set()
for input in input_items:
self.__get_deep_dependent_item_set(input, input_set)
output_set = set()
for output in output_items:
self.__get_deep_dependent_item_set(output, output_set)
if input_set.intersection(output_set):
computation.close()
raise Exception("Computation would result in duplicate dependency.")
self._project.append_computation(computation)
def insert_computation(self, before_index: int, computation: Symbolic.Computation) -> None:
uuid_order = save_item_order(self.__computations)
self.append_computation(computation)
insert_item_order(uuid_order, before_index, computation)
self.__computations = restore_item_order(self._project, uuid_order)
def remove_computation(self, computation: Symbolic.Computation, *, safe: bool=False) -> None:
self.__cascade_delete(computation, safe=safe).close()
def remove_computation_with_log(self, computation: Symbolic.Computation, *, safe: bool=False) -> Changes.UndeleteLog:
return self.__cascade_delete(computation, safe=safe)
def __handle_computation_inserted(self, computation: Symbolic.Computation) -> None:
assert computation is not None
assert computation not in self.__computations
# insert in internal list
before_index = len(self.__computations)
self.__computations.append(computation)
# listeners
self.__computation_changed_listeners[computation] = computation.computation_mutated_event.listen(functools.partial(self.__computation_changed, computation))
self.__computation_output_changed_listeners[computation] = computation.computation_output_changed_event.listen(functools.partial(self.__computation_update_dependencies, computation))
# send notifications
self.__computation_changed(computation) # ensure the initial mutation is reported
self.notify_insert_item("computations", computation, before_index)
def __handle_computation_removed(self, computation: Symbolic.Computation) -> None:
# remove it from the persistent_storage
assert computation is not None
assert computation in self.__computations
# remove it from any computation queues
with self.__computation_queue_lock:
computation_pending_queue = self.__computation_pending_queue
self.__computation_pending_queue = list()
for computation_queue_item in computation_pending_queue:
if not computation_queue_item.computation is computation:
self.__computation_pending_queue.append(computation_queue_item)
if self.__computation_active_item and computation is self.__computation_active_item.computation:
self.__computation_active_item.valid = False
computation_changed_listener = self.__computation_changed_listeners.pop(computation, None)
if computation_changed_listener: computation_changed_listener.close()
computation_output_changed_listener = self.__computation_output_changed_listeners.pop(computation, None)
if computation_output_changed_listener: computation_output_changed_listener.close()
# notifications
index = self.__computations.index(computation)
self.notify_remove_item("computations", computation, index)
# remove from internal list
self.__computations.remove(computation)
def __computation_changed(self, computation):
# when the computation is mutated, this function is called. it calls the handle computation
# changed or mutated method to resolve computation variables and update dependencies between
# library objects. it also fires the computation_updated_event to allow the user interface
# to update.
# during updating of dependencies, this HUGE hack is in place to delay the computation changed
# messages until ALL of the dependencies are updated so as to avoid the computation changed message
# reestablishing dependencies during the updating of them. UGH. planning a better way...
if self.__computation_changed_delay_list is not None:
if computation not in self.__computation_changed_delay_list:
self.__computation_changed_delay_list.append(computation)
else:
self.__computation_update_dependencies(computation)
self.__computation_needs_update(computation)
self.computation_updated_event.fire(computation)
def __finish_computation_changed(self):
computation_changed_delay_list = self.__computation_changed_delay_list
self.__computation_changed_delay_list = None
for computation in computation_changed_delay_list:
self.__computation_changed(computation)
def __computation_update_dependencies(self, computation):
# when a computation output is changed, this function is called to establish dependencies.
# if other parts of the computation are changed (inputs, values, etc.), the __computation_changed
# will handle the change (and trigger a new computation).
input_items = set(computation.input_items)
output_items = set(computation.output_items)
self.__establish_computation_dependencies(computation._inputs, input_items, computation._outputs, output_items)
computation._inputs = input_items
computation._outputs = output_items
def __digest_requirement(self, requirement: typing.Mapping[str, typing.Any], data_item: DataItem.DataItem) -> bool:
requirement_type = requirement["type"]
if requirement_type == "datum_rank":
values = requirement.get("values")
if not data_item.datum_dimension_count in values:
return False
if requirement_type == "datum_calibrations":
if requirement.get("units") == "equal":
if len(set([calibration.units for calibration in data_item.xdata.datum_dimensional_calibrations])) != 1:
return False
if requirement_type == "dimensionality":
min_dimension = requirement.get("min")
max_dimension = requirement.get("max")
dimensionality = len(data_item.dimensional_shape)
if min_dimension is not None and dimensionality < min_dimension:
return False
if max_dimension is not None and dimensionality > max_dimension:
return False
if requirement_type == "is_rgb_type":
if not data_item.xdata.is_data_rgb_type:
return False
if requirement_type == "is_sequence":
if not data_item.is_sequence:
return False
if requirement_type == "is_navigable":
if not data_item.is_sequence and not data_item.is_collection:
return False
if requirement_type == "bool":
operator = requirement["operator"]
for operand in requirement["operands"]:
requirement_satisfied = self.__digest_requirement(operand, data_item)
if operator == "not":
return not requirement_satisfied
if operator == "and" and not requirement_satisfied:
return False
if operator == "or" and requirement_satisfied:
return True
else:
if operator == "or":
return False
return True
def __make_computation(self, processing_id: str, inputs: typing.List[typing.Tuple[DisplayItem.DisplayItem, typing.Optional[DisplayItem.DataItem], typing.Optional[Graphics.Graphic]]], region_list_map: typing.Mapping[str, typing.List[Graphics.Graphic]]=None, parameters: typing.Mapping[str, typing.Any]=None) -> typing.Optional[DataItem.DataItem]:
"""Create a new data item with computation specified by processing_id, inputs, and region_list_map.
The region_list_map associates a list of graphics corresponding to the required regions with a computation source (key).
"""
region_list_map = region_list_map or dict()
parameters = parameters or dict()
processing_descriptions = Project.Project._processing_descriptions
processing_description = processing_descriptions[processing_id]
# first process the sources in the description. match them to the inputs (which are data item/crop graphic tuples)
src_dicts = processing_description.get("sources", list())
assert len(inputs) == len(src_dicts)
src_names = list()
src_texts = list()
src_labels = list()
regions = list()
region_map = dict()
for i, (src_dict, input) in enumerate(zip(src_dicts, inputs)):
display_item, data_item, _ = input
if not data_item:
return None
# each source can have a list of requirements, check through them
# implicit "and" connection between the requirements in the list. Could be changed to use the new
# boolean options, but leave it like this for backwards compatibility for now.
requirements = src_dict.get("requirements", list())
for requirement in requirements:
if not self.__digest_requirement(requirement, data_item):
return None
src_name = src_dict["name"]
src_label = src_dict["label"]
src_names.append(src_name)
src_texts.append(src_name)
src_labels.append(src_label)
# each source can have a list of regions to be matched to arguments or created on the source
region_dict_list = src_dict.get("regions", list())
src_region_list = region_list_map.get(src_name, list())
assert len(region_dict_list) == len(src_region_list)
for region_dict, region in zip(region_dict_list, src_region_list):
region_params = region_dict.get("params", dict())
region_type = region_dict["type"]
region_name = region_dict["name"]
region_label = region_params.get("label")
if region_type == "point":
if region:
assert isinstance(region, Graphics.PointGraphic)
point_region = region
else:
point_region = Graphics.PointGraphic()
for k, v in region_params.items():
setattr(point_region, k, v)
if display_item:
display_item.add_graphic(point_region)
regions.append((region_name, point_region, region_label))
region_map[region_name] = point_region
elif region_type == "line":
if region:
assert isinstance(region, Graphics.LineProfileGraphic)
line_region = region
else:
line_region = Graphics.LineProfileGraphic()
line_region.start = 0.25, 0.25
line_region.end = 0.75, 0.75
for k, v in region_params.items():
setattr(line_region, k, v)
if display_item:
display_item.add_graphic(line_region)
regions.append((region_name, line_region, region_params.get("label")))
region_map[region_name] = line_region
elif region_type == "rectangle":
if region:
assert isinstance(region, Graphics.RectangleGraphic)
rect_region = region
else:
rect_region = Graphics.RectangleGraphic()
rect_region.center = 0.5, 0.5
rect_region.size = 0.5, 0.5
for k, v in region_params.items():
setattr(rect_region, k, v)
if display_item:
display_item.add_graphic(rect_region)
regions.append((region_name, rect_region, region_params.get("label")))
region_map[region_name] = rect_region
elif region_type == "ellipse":
if region:
assert isinstance(region, Graphics.EllipseGraphic)
ellipse_region = region
else:
ellipse_region = Graphics.RectangleGraphic()
ellipse_region.center = 0.5, 0.5
ellipse_region.size = 0.5, 0.5
for k, v in region_params.items():
setattr(ellipse_region, k, v)
if display_item:
display_item.add_graphic(ellipse_region)
regions.append((region_name, ellipse_region, region_params.get("label")))
region_map[region_name] = ellipse_region
elif region_type == "spot":
if region:
assert isinstance(region, Graphics.SpotGraphic)
spot_region = region
else:
spot_region = Graphics.SpotGraphic()
spot_region.bounds = Geometry.FloatRect.from_center_and_size((0.25, 0.25), (0.25, 0.25))
for k, v in region_params.items():
setattr(spot_region, k, v)
if display_item:
display_item.add_graphic(spot_region)
regions.append((region_name, spot_region, region_params.get("label")))
region_map[region_name] = spot_region
elif region_type == "interval":
if region:
assert isinstance(region, Graphics.IntervalGraphic)
interval_graphic = region
else:
interval_graphic = Graphics.IntervalGraphic()
for k, v in region_params.items():
setattr(interval_graphic, k, v)
if display_item:
display_item.add_graphic(interval_graphic)
regions.append((region_name, interval_graphic, region_params.get("label")))
region_map[region_name] = interval_graphic
elif region_type == "channel":
if region:
assert isinstance(region, Graphics.ChannelGraphic)
channel_region = region
else:
channel_region = Graphics.ChannelGraphic()
for k, v in region_params.items():
setattr(channel_region, k, v)
if display_item:
display_item.add_graphic(channel_region)
regions.append((region_name, channel_region, region_params.get("label")))
region_map[region_name] = channel_region
# now extract the script (full script) or expression (implied imports and return statement)
script = None
expression = processing_description.get("expression")
if expression:
script = Symbolic.xdata_expression(expression)
script = script.format(**dict(zip(src_names, src_texts)))
# construct the computation
computation = self.create_computation(script)
computation.attributes.update(processing_description.get("attributes", dict()))
computation.label = processing_description["title"]
computation.processing_id = processing_id
# process the data item inputs
for src_dict, src_name, src_label, input in zip(src_dicts, src_names, src_labels, inputs):
in_display_item, data_item, graphic = input
secondary_item = None
if src_dict.get("croppable", False):
secondary_item = graphic
display_data_channel = in_display_item.get_display_data_channel_for_data_item(data_item)
computation.create_input_item(src_name, Symbolic.make_item(display_data_channel, secondary_item=secondary_item), label=src_label)
# process the regions
for region_name, region, region_label in regions:
computation.create_input_item(region_name, Symbolic.make_item(region), label=region_label)
# next process the parameters
for param_dict in processing_description.get("parameters", list()):
parameter_value = parameters.get(param_dict["name"], param_dict["value"])
computation.create_variable(param_dict["name"], param_dict["type"], parameter_value, value_default=param_dict.get("value_default"),
value_min=param_dict.get("value_min"), value_max=param_dict.get("value_max"),
control_type=param_dict.get("control_type"), label=param_dict.get("label"))
data_item0 = inputs[0][1]
new_data_item = DataItem.new_data_item()
prefix = "{} of ".format(processing_description["title"])
new_data_item.title = prefix + data_item0.title
new_data_item.category = data_item0.category
self.append_data_item(new_data_item)
new_display_item = self.get_display_item_for_data_item(new_data_item)
# next come the output regions that get created on the target itself
new_regions = dict()
for out_region_dict in processing_description.get("out_regions", list()):
region_type = out_region_dict["type"]
region_name = out_region_dict["name"]
region_params = out_region_dict.get("params", dict())
if region_type == "interval":
interval_graphic = Graphics.IntervalGraphic()
for k, v in region_params.items():
setattr(interval_graphic, k, v)
new_display_item.add_graphic(interval_graphic)
new_regions[region_name] = interval_graphic
elif region_type == "point":
point_graphic = Graphics.PointGraphic()
for k, v in region_params.items():
setattr(point_graphic, k, v)
new_display_item.add_graphic(point_graphic)
new_regions[region_name] = point_graphic
# save setting the computation until last to work around threaded clone/merge operation bug.
# the bug is that setting the computation triggers the recompute to occur on a thread.
# the recompute clones the data item and runs the operation. meanwhile this thread
# updates the connection. now the recompute finishes and merges back the data item
# which was cloned before the connection was established, effectively reversing the
# update that matched the graphic interval to the slice interval on the display.
# the result is that the slice interval on the display would get set to the default
# value of the graphic interval. so don't actually update the computation until after
# everything is configured. permanent solution would be to improve the clone/merge to
# only update data that had been changed. alternative implementation would only track
# changes to the data item and then apply them again to the original during merge.
self.set_data_item_computation(new_data_item, computation)
return new_data_item
_builtin_processing_descriptions = None
@classmethod
def register_processing_descriptions(cls, processing_descriptions: typing.Dict) -> None:
assert len(set(Project.Project._processing_descriptions.keys()).intersection(set(processing_descriptions.keys()))) == 0
Project.Project._processing_descriptions.update(processing_descriptions)
@classmethod
def unregister_processing_descriptions(cls, processing_ids: typing.Sequence[str]):
assert len(set(Project.Project._processing_descriptions.keys()).intersection(set(processing_ids))) == len(processing_ids)
for processing_id in processing_ids:
Project.Project._processing_descriptions.pop(processing_id)
@classmethod
def _get_builtin_processing_descriptions(cls) -> typing.Dict:
if not cls._builtin_processing_descriptions:
vs = dict()
requirement_2d = {"type": "dimensionality", "min": 2, "max": 2}
requirement_3d = {"type": "dimensionality", "min": 3, "max": 3}
requirement_4d = {"type": "dimensionality", "min": 4, "max": 4}
requirement_2d_to_3d = {"type": "dimensionality", "min": 2, "max": 3}
requirement_2d_to_4d = {"type": "dimensionality", "min": 2, "max": 4}
requirement_2d_to_5d = {"type": "dimensionality", "min": 2, "max": 5}
requirement_is_rgb_type = {"type": "is_rgb_type"}
requirement_is_sequence = {"type": "is_sequence"}
requirement_is_navigable = {"type": "is_navigable"}
requirement_is_not_sequence = {"type": "bool", "operator": "not", "operands": [requirement_is_sequence]}
requirement_4d_if_sequence_else_3d = {"type": "bool", "operator": "or",
"operands": [{"type": "bool", "operator": "and",
"operands": [requirement_is_not_sequence, requirement_3d]},
{"type": "bool", "operator": "and",
"operands": [requirement_is_sequence, requirement_4d]}]}
for processing_component in typing.cast(typing.Sequence[Processing.ProcessingBase], Registry.get_components_by_type("processing-component")):
processing_component.register_computation()
vs[processing_component.processing_id] = {
"title": processing_component.title,
"sources": processing_component.sources,
"parameters": processing_component.parameters,
"attributes": processing_component.attributes,
}
if processing_component.is_mappable and not processing_component.is_scalar:
mapping_param = {"name": "mapping", "label": _("Sequence/Collection Mapping"), "type": "string", "value": "none", "value_default": "none", "control_type": "choice"}
vs[processing_component.processing_id].setdefault("parameters", list()).insert(0, mapping_param)
if processing_component.is_mappable and processing_component.is_scalar:
map_out_region = {"name": "pick_point", "type": "point", "params": {"label": _("Pick"), "role": "collection_index"}}
vs[processing_component.processing_id]["out_regions"] = [map_out_region]
# TODO: generalize this so that other sequence/collections can be accepted by making a coordinate system monitor or similar
# TODO: processing should declare its relationship to input coordinate system and swift should automatically connect pickers
# TODO: in appropriate places.
vs[processing_component.processing_id]["requirements"] = [requirement_4d]
vs["fft"] = {"title": _("FFT"), "expression": "xd.fft({src}.cropped_display_xdata)", "sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
vs["inverse-fft"] = {"title": _("Inverse FFT"), "expression": "xd.ifft({src}.xdata)",
"sources": [{"name": "src", "label": _("Source")}]}
vs["auto-correlate"] = {"title": _("Auto Correlate"), "expression": "xd.autocorrelate({src}.cropped_display_xdata)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
vs["cross-correlate"] = {"title": _("Cross Correlate"), "expression": "xd.crosscorrelate({src1}.cropped_display_xdata, {src2}.cropped_display_xdata)",
"sources": [{"name": "src1", "label": _("Source 1"), "croppable": True}, {"name": "src2", "label": _("Source 2"), "croppable": True}]}
vs["sobel"] = {"title": _("Sobel"), "expression": "xd.sobel({src}.cropped_display_xdata)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
vs["laplace"] = {"title": _("Laplace"), "expression": "xd.laplace({src}.cropped_display_xdata)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
sigma_param = {"name": "sigma", "label": _("Sigma"), "type": "real", "value": 3, "value_default": 3, "value_min": 0, "value_max": 100,
"control_type": "slider"}
vs["gaussian-blur"] = {"title": _("Gaussian Blur"), "expression": "xd.gaussian_blur({src}.cropped_display_xdata, sigma)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [sigma_param]}
filter_size_param = {"name": "filter_size", "label": _("Size"), "type": "integral", "value": 3, "value_default": 3, "value_min": 1, "value_max": 100}
vs["median-filter"] = {"title": _("Median Filter"), "expression": "xd.median_filter({src}.cropped_display_xdata, filter_size)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [filter_size_param]}
vs["uniform-filter"] = {"title": _("Uniform Filter"), "expression": "xd.uniform_filter({src}.cropped_display_xdata, filter_size)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [filter_size_param]}
do_transpose_param = {"name": "do_transpose", "label": _("Transpose"), "type": "boolean", "value": False, "value_default": False}
do_flip_v_param = {"name": "do_flip_v", "label": _("Flip Vertical"), "type": "boolean", "value": False, "value_default": False}
do_flip_h_param = {"name": "do_flip_h", "label": _("Flip Horizontal"), "type": "boolean", "value": False, "value_default": False}
vs["transpose-flip"] = {"title": _("Transpose/Flip"), "expression": "xd.transpose_flip({src}.cropped_display_xdata, do_transpose, do_flip_v, do_flip_h)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [do_transpose_param, do_flip_v_param, do_flip_h_param]}
width_param = {"name": "width", "label": _("Width"), "type": "integral", "value": 256, "value_default": 256, "value_min": 1}
height_param = {"name": "height", "label": _("Height"), "type": "integral", "value": 256, "value_default": 256, "value_min": 1}
vs["rebin"] = {"title": _("Rebin"), "expression": "xd.rebin_image({src}.cropped_display_xdata, (height, width))",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [width_param, height_param]}
vs["resample"] = {"title": _("Resample"), "expression": "xd.resample_image({src}.cropped_display_xdata, (height, width))",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [width_param, height_param]}
vs["resize"] = {"title": _("Resize"), "expression": "xd.resize({src}.cropped_display_xdata, (height, width), 'mean')",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [width_param, height_param]}
is_sequence_param = {"name": "is_sequence", "label": _("Sequence"), "type": "bool", "value": False, "value_default": False}
collection_dims_param = {"name": "collection_dims", "label": _("Collection Dimensions"), "type": "integral", "value": 0, "value_default": 0, "value_min": 0, "value_max": 0}
datum_dims_param = {"name": "datum_dims", "label": _("Datum Dimensions"), "type": "integral", "value": 1, "value_default": 1, "value_min": 1, "value_max": 0}
vs["redimension"] = {"title": _("Redimension"), "expression": "xd.redimension({src}.xdata, xd.data_descriptor(is_sequence=is_sequence, collection_dims=collection_dims, datum_dims=datum_dims))",
"sources": [{"name": "src", "label": _("Source")}], "parameters": [is_sequence_param, collection_dims_param, datum_dims_param]}
vs["squeeze"] = {"title": _("Squeeze"), "expression": "xd.squeeze({src}.xdata)",
"sources": [{"name": "src", "label": _("Source")}]}
bins_param = {"name": "bins", "label": _("Bins"), "type": "integral", "value": 256, "value_default": 256, "value_min": 2}
vs["histogram"] = {"title": _("Histogram"), "expression": "xd.histogram({src}.cropped_display_xdata, bins)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}], "parameters": [bins_param]}
vs["add"] = {"title": _("Add"), "expression": "{src1}.cropped_display_xdata + {src2}.cropped_display_xdata",
"sources": [{"name": "src1", "label": _("Source 1"), "croppable": True}, {"name": "src2", "label": _("Source 2"), "croppable": True}]}
vs["subtract"] = {"title": _("Subtract"), "expression": "{src1}.cropped_display_xdata - {src2}.cropped_display_xdata",
"sources": [{"name": "src1", "label": _("Source 1"), "croppable": True}, {"name": "src2", "label": _("Source 2"), "croppable": True}]}
vs["multiply"] = {"title": _("Multiply"), "expression": "{src1}.cropped_display_xdata * {src2}.cropped_display_xdata",
"sources": [{"name": "src1", "label": _("Source 1"), "croppable": True}, {"name": "src2", "label": _("Source 2"), "croppable": True}]}
vs["divide"] = {"title": _("Divide"), "expression": "{src1}.cropped_display_xdata / {src2}.cropped_display_xdata",
"sources": [{"name": "src1", "label": _("Source 1"), "croppable": True}, {"name": "src2", "label": _("Source 2"), "croppable": True}]}
vs["invert"] = {"title": _("Negate"), "expression": "xd.invert({src}.cropped_display_xdata)", "sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
vs["masked"] = {"title": _("Masked"), "expression": "{src}.filtered_xdata", "sources": [{"name": "src", "label": _("Source")}]}
vs["mask"] = {"title": _("Mask"), "expression": "{src}.filter_xdata", "sources": [{"name": "src", "label": _("Source")}]}
vs["convert-to-scalar"] = {"title": _("Scalar"), "expression": "{src}.cropped_display_xdata",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
vs["crop"] = {"title": _("Crop"), "expression": "{src}.cropped_display_xdata",
"sources": [{"name": "src", "label": _("Source"), "croppable": True}]}
vs["sum"] = {"title": _("Sum"), "expression": "xd.sum({src}.cropped_xdata, {src}.xdata.datum_dimension_indexes[0])",
"sources": [{"name": "src", "label": _("Source"), "croppable": True, "requirements": [requirement_2d_to_4d]}]}
slice_center_param = {"name": "center", "label": _("Center"), "type": "integral", "value": 0, "value_default": 0, "value_min": 0}
slice_width_param = {"name": "width", "label": _("Width"), "type": "integral", "value": 1, "value_default": 1, "value_min": 1}
vs["slice"] = {"title": _("Slice"), "expression": "xd.slice_sum({src}.cropped_xdata, center, width)",
"sources": [{"name": "src", "label": _("Source"), "croppable": True, "requirements": [requirement_3d]}],
"parameters": [slice_center_param, slice_width_param]}
pick_in_region = {"name": "pick_region", "type": "point", "params": {"label": _("Pick Point")}}
pick_out_region = {"name": "interval_region", "type": "interval", "params": {"label": _("Display Slice"), "role": "slice"}}
vs["pick-point"] = {"title": _("Pick"), "expression": "xd.pick({src}.xdata, pick_region.position)",
"sources": [{"name": "src", "label": _("Source"), "regions": [pick_in_region], "requirements": [requirement_4d_if_sequence_else_3d]}],
"out_regions": [pick_out_region]}
pick_sum_in_region = {"name": "region", "type": "rectangle", "params": {"label": _("Pick Region")}}
pick_sum_out_region = {"name": "interval_region", "type": "interval", "params": {"label": _("Display Slice"), "role": "slice"}}
vs["pick-mask-sum"] = {"title": _("Pick Sum"), "expression": "xd.sum_region({src}.xdata, region.mask_xdata_with_shape({src}.xdata.data_shape[-3:-1]))",
"sources": [{"name": "src", "label": _("Source"), "regions": [pick_sum_in_region], "requirements": [requirement_4d_if_sequence_else_3d]}],
"out_regions": [pick_sum_out_region]}
vs["pick-mask-average"] = {"title": _("Pick Average"), "expression": "xd.average_region({src}.xdata, region.mask_xdata_with_shape({src}.xdata.data_shape[-3:-1]))",
"sources": [{"name": "src", "label": _("Source"), "regions": [pick_sum_in_region], "requirements": [requirement_4d_if_sequence_else_3d]}],
"out_regions": [pick_sum_out_region]}
vs["subtract-mask-average"] = {"title": _("Subtract Average"), "expression": "{src}.xdata - xd.average_region({src}.xdata, region.mask_xdata_with_shape({src}.xdata.data_shape[0:2]))",
"sources": [{"name": "src", "label": _("Source"), "regions": [pick_sum_in_region], "requirements": [requirement_3d]}],
"out_regions": [pick_sum_out_region]}
line_profile_in_region = {"name": "line_region", "type": "line", "params": {"label": _("Line Profile")}}
vs["line-profile"] = {"title": _("Line Profile"), "expression": "xd.line_profile(xd.absolute({src}.element_xdata) if {src}.element_xdata.is_data_complex_type else {src}.element_xdata, line_region.vector, line_region.line_width)",
"sources": [{"name": "src", "label": _("Source"), "regions": [line_profile_in_region]}]}
vs["filter"] = {"title": _("Filter"), "expression": "xd.real(xd.ifft({src}.filtered_xdata))",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_2d]}]}
vs["sequence-register"] = {"title": _("Shifts"), "expression": "xd.sequence_squeeze_measurement(xd.sequence_measure_relative_translation({src}.xdata, {src}.xdata[numpy.unravel_index(0, {src}.xdata.navigation_dimension_shape)], 100))",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_2d_to_3d]}]}
vs["sequence-align"] = {"title": _("Alignment"), "expression": "xd.sequence_align({src}.xdata, 100)",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_2d_to_5d, requirement_is_navigable]}]}
vs["sequence-fourier-align"] = {"title": _("Alignment"), "expression": "xd.sequence_fourier_align({src}.xdata, 100)",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_2d_to_5d, requirement_is_navigable]}]}
vs["sequence-integrate"] = {"title": _("Integrate"), "expression": "xd.sequence_integrate({src}.xdata)",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_sequence]}]}
trim_start_param = {"name": "start", "label": _("Start"), "type": "integral", "value": 0, "value_default": 0, "value_min": 0}
trim_end_param = {"name": "end", "label": _("End"), "type": "integral", "value": 1, "value_default": 1, "value_min": 1}
vs["sequence-trim"] = {"title": _("Trim"), "expression": "xd.sequence_trim({src}.xdata, start, end)",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_sequence]}],
"parameters": [trim_start_param, trim_end_param]}
index_param = {"name": "index", "label": _("Index"), "type": "integral", "value": 1, "value_default": 1, "value_min": 1}
vs["sequence-extract"] = {"title": _("Extract"), "expression": "xd.sequence_extract({src}.xdata, index)",
"sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_sequence]}],
"parameters": [index_param]}
vs["make-rgb"] = {"title": _("RGB"), "expression": "xd.rgb({src_red}.cropped_transformed_xdata, {src_green}.cropped_transformed_xdata, {src_blue}.cropped_transformed_xdata)",
"sources": [{"name": "src_red", "label": _("Red"), "croppable": True, "requirements": [requirement_2d]},
{"name": "src_green", "label": _("Green"), "croppable": True, "requirements": [requirement_2d]},
{"name": "src_blue", "label": _("Blue"), "croppable": True, "requirements": [requirement_2d]}]}
vs["extract-luminance"] = {"title": _("Luminance"), "expression": "xd.luminance({src}.display_rgba)", "sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_rgb_type]}]}
vs["extract-red"] = {"title": _("Red"), "expression": "xd.red({src}.display_rgba)", "sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_rgb_type]}]}
vs["extract-green"] = {"title": _("Green"), "expression": "xd.green({src}.display_rgba)", "sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_rgb_type]}]}
vs["extract-blue"] = {"title": _("Blue"), "expression": "xd.blue({src}.display_rgba)", "sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_rgb_type]}]}
vs["extract-alpha"] = {"title": _("Alpha"), "expression": "xd.alpha({src}.display_rgba)", "sources": [{"name": "src", "label": _("Source"), "requirements": [requirement_is_rgb_type]}]}
cls._builtin_processing_descriptions = vs
return cls._builtin_processing_descriptions
def get_fft_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("fft", [(display_item, data_item, crop_region)])
def get_ifft_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("inverse-fft", [(display_item, data_item, crop_region)])
def get_auto_correlate_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("auto-correlate", [(display_item, data_item, crop_region)])
def get_cross_correlate_new(self, display_item1: DisplayItem.DisplayItem, data_item1: DataItem.DataItem, display_item2: DisplayItem.DisplayItem, data_item2: DataItem.DataItem, crop_region1: Graphics.RectangleTypeGraphic=None, crop_region2: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("cross-correlate", [(display_item1, data_item1, crop_region1), (display_item2, data_item2, crop_region2)])
def get_sobel_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sobel", [(display_item, data_item, crop_region)])
def get_laplace_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("laplace", [(display_item, data_item, crop_region)])
def get_gaussian_blur_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("gaussian-blur", [(display_item, data_item, crop_region)])
def get_median_filter_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("median-filter", [(display_item, data_item, crop_region)])
def get_uniform_filter_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("uniform-filter", [(display_item, data_item, crop_region)])
def get_transpose_flip_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("transpose-flip", [(display_item, data_item, crop_region)])
def get_rebin_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("rebin", [(display_item, data_item, crop_region)])
def get_resample_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("resample", [(display_item, data_item, crop_region)])
def get_resize_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("resize", [(display_item, data_item, crop_region)])
def get_redimension_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, data_descriptor: DataAndMetadata.DataDescriptor) -> DataItem.DataItem:
return self.__make_computation("redimension", [(display_item, data_item, None)], parameters={"is_sequence": data_descriptor.is_sequence, "collection_dims": data_descriptor.collection_dimension_count, "datum_dims": data_descriptor.datum_dimension_count})
def get_squeeze_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem) -> DataItem.DataItem:
return self.__make_computation("squeeze", [(display_item, data_item, None)])
def get_histogram_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("histogram", [(display_item, data_item, crop_region)])
def get_add_new(self, display_item1: DisplayItem.DisplayItem, data_item1: DataItem.DataItem, display_item2: DisplayItem.DisplayItem, data_item2: DataItem.DataItem, crop_region1: Graphics.RectangleTypeGraphic=None, crop_region2: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("add", [(display_item1, data_item1, crop_region1), (display_item2, data_item2, crop_region2)])
def get_subtract_new(self, display_item1: DisplayItem.DisplayItem, data_item1: DataItem.DataItem, display_item2: DisplayItem.DisplayItem, data_item2: DataItem.DataItem, crop_region1: Graphics.RectangleTypeGraphic=None, crop_region2: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("subtract", [(display_item1, data_item1, crop_region1), (display_item2, data_item2, crop_region2)])
def get_multiply_new(self, display_item1: DisplayItem.DisplayItem, data_item1: DataItem.DataItem, display_item2: DisplayItem.DisplayItem, data_item2: DataItem.DataItem, crop_region1: Graphics.RectangleTypeGraphic=None, crop_region2: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("multiply", [(display_item1, data_item1, crop_region1), (display_item2, data_item2, crop_region2)])
def get_divide_new(self, display_item1: DisplayItem.DisplayItem, data_item1: DataItem.DataItem, display_item2: DisplayItem.DisplayItem, data_item2: DataItem.DataItem, crop_region1: Graphics.RectangleTypeGraphic=None, crop_region2: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("divide", [(display_item1, data_item1, crop_region1), (display_item2, data_item2, crop_region2)])
def get_invert_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("invert", [(display_item, data_item, crop_region)])
def get_masked_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("masked", [(display_item, data_item, crop_region)])
def get_mask_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("mask", [(display_item, data_item, crop_region)])
def get_convert_to_scalar_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("convert-to-scalar", [(display_item, data_item, crop_region)])
def get_crop_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
if data_item and display_item and not crop_region:
if data_item.is_data_2d:
rect_region = Graphics.RectangleGraphic()
rect_region.center = 0.5, 0.5
rect_region.size = 0.5, 0.5
display_item.add_graphic(rect_region)
crop_region = rect_region
elif data_item.is_data_1d:
interval_region = Graphics.IntervalGraphic()
interval_region.interval = 0.25, 0.75
display_item.add_graphic(interval_region)
crop_region = interval_region
return self.__make_computation("crop", [(display_item, data_item, crop_region)])
def get_projection_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sum", [(display_item, data_item, crop_region)])
def get_slice_sum_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("slice", [(display_item, data_item, crop_region)])
def get_pick_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None, pick_region: Graphics.PointTypeGraphic=None) -> DataItem.DataItem:
data_item = self.__make_computation("pick-point", [(display_item, data_item, crop_region)], {"src": [pick_region]})
if data_item:
display_data_channel = display_item.display_data_channels[0]
if display_data_channel.slice_center == 0 and display_data_channel.slice_width == 1:
display_data_channel.slice_interval = (0.05, 0.15)
return data_item
def get_pick_region_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None, pick_region: Graphics.Graphic=None) -> DataItem.DataItem:
data_item = self.__make_computation("pick-mask-sum", [(display_item, data_item, crop_region)], {"src": [pick_region]})
if data_item:
display_data_channel = display_item.display_data_channels[0]
if display_data_channel.slice_center == 0 and display_data_channel.slice_width == 1:
display_data_channel.slice_interval = (0.05, 0.15)
return data_item
def get_pick_region_average_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None, pick_region: Graphics.Graphic=None) -> DataItem.DataItem:
data_item = self.__make_computation("pick-mask-average", [(display_item, data_item, crop_region)], {"src": [pick_region]})
if data_item:
display_data_channel = display_item.display_data_channels[0]
if display_data_channel.slice_center == 0 and display_data_channel.slice_width == 1:
display_data_channel.slice_interval = (0.05, 0.15)
return data_item
def get_subtract_region_average_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None, pick_region: Graphics.Graphic=None) -> DataItem.DataItem:
return self.__make_computation("subtract-mask-average", [(display_item, data_item, crop_region)], {"src": [pick_region]})
def get_line_profile_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None, line_region: Graphics.LineTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("line-profile", [(display_item, data_item, crop_region)], {"src": [line_region]})
def get_fourier_filter_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
data_item = display_item.data_item
if data_item and display_item:
has_mask = False
for graphic in display_item.graphics:
if isinstance(graphic, (Graphics.SpotGraphic, Graphics.WedgeGraphic, Graphics.RingGraphic, Graphics.LatticeGraphic)):
has_mask = True
break
if not has_mask:
graphic = Graphics.RingGraphic()
graphic.radius_1 = 0.15
graphic.radius_2 = 0.25
display_item.add_graphic(graphic)
return self.__make_computation("filter", [(display_item, data_item, crop_region)])
def get_processing_new(self, processing_id: str, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation(processing_id, [(display_item, data_item, crop_region)])
def get_sequence_measure_shifts_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sequence-register", [(display_item, data_item, crop_region)])
def get_sequence_align_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sequence-align", [(display_item, data_item, crop_region)])
def get_sequence_fourier_align_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sequence-fourier-align", [(display_item, data_item, crop_region)])
def get_sequence_integrate_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sequence-integrate", [(display_item, data_item, crop_region)])
def get_sequence_trim_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sequence-trim", [(display_item, data_item, crop_region)])
def get_sequence_extract_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("sequence-extract", [(display_item, data_item, crop_region)])
def get_rgb_new(self, display_item1: DisplayItem.DisplayItem, data_item1: DataItem.DataItem, display_item2: DisplayItem.DisplayItem, data_item2: DataItem.DataItem, display_item3: DisplayItem.DisplayItem, data_item3: DataItem.DataItem, crop_region1: Graphics.RectangleTypeGraphic=None, crop_region2: Graphics.RectangleTypeGraphic=None, crop_region3: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("make-rgb", [(display_item1, data_item1, crop_region1),
(display_item2, data_item2, crop_region2),
(display_item3, data_item3, crop_region3)])
def get_rgb_alpha_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("extract-alpha", [(display_item, data_item, crop_region)])
def get_rgb_blue_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("extract-blue", [(display_item, data_item, crop_region)])
def get_rgb_green_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("extract-green", [(display_item, data_item, crop_region)])
def get_rgb_luminance_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("extract-luminance", [(display_item, data_item, crop_region)])
def get_rgb_red_new(self, display_item: DisplayItem.DisplayItem, data_item: DataItem.DataItem, crop_region: Graphics.RectangleTypeGraphic=None) -> DataItem.DataItem:
return self.__make_computation("extract-red", [(display_item, data_item, crop_region)])
class ConnectPickDisplay(Observer.AbstractAction):
def __init__(self, document_model: DocumentModel, item_value: Observer.ItemValue):
self.__document_model = document_model
self.__implicit_dependency = None
self.__sequence_index_property_connector = None
self.__slice_interval_property_connector = None
if item_value and isinstance(item_value, tuple):
item_value = typing.cast(typing.Tuple[DisplayItem.DisplayDataChannel, typing.Sequence[Graphics.IntervalGraphic]], item_value)
if len(item_value) == 2 and item_value[0] and item_value[1]:
display_data_channel = item_value[0]
interval_graphics = item_value[1]
sequence_index_property_connector_items = list()
slice_interval_property_connector_items = list()
sequence_index_property_connector_items.append(Connector.PropertyConnectorItem(display_data_channel, "sequence_index"))
slice_interval_property_connector_items.append(Connector.PropertyConnectorItem(display_data_channel, "slice_interval"))
for interval_graphic in interval_graphics:
slice_interval_property_connector_items.append(Connector.PropertyConnectorItem(interval_graphic, "interval"))
for interval_display_data_channel in typing.cast(typing.Sequence[DisplayItem.DisplayDataChannel], interval_graphic.container.display_data_channels):
sequence_index_property_connector_items.append(Connector.PropertyConnectorItem(interval_display_data_channel, "sequence_index"))
self.__sequence_index_property_connector = Connector.PropertyConnector(sequence_index_property_connector_items)
self.__slice_interval_property_connector = Connector.PropertyConnector(slice_interval_property_connector_items)
self.__implicit_dependency = ImplicitDependency(interval_graphics, display_data_channel)
document_model.register_implicit_dependency(self.__implicit_dependency)
def close(self) -> None:
if self.__sequence_index_property_connector:
self.__sequence_index_property_connector.close()
self.__sequence_index_property_connector = None
if self.__slice_interval_property_connector:
self.__slice_interval_property_connector.close()
self.__slice_interval_property_connector = None
if self.__implicit_dependency:
self.__document_model.unregister_implicit_dependency(self.__implicit_dependency)
class ImplicitPickConnection:
"""Facilitate connections between a sequence/collection of 1D data and a line plot from a pick-style computation.
When the sequence/collection slice interval changes, update the line plot display slice interval (if present).
When the line plot display slice interval changes, update the sequence/collection slice interval.
When the sequence/collection sequence index changes, update the line plot sequence index.
When the line plot sequence index changes, update the sequence/collection sequence index.
"""
def __init__(self, document_model: DocumentModel):
def match_pick(computation: Symbolic.Computation) -> bool:
return computation.processing_id in ("pick-point", "pick-mask-sum", "pick-mask-average", "subtract-mask-average")
def match_graphic(graphic: Graphics.Graphic) -> bool:
return graphic.role == "slice"
# use an observer builder to construct the observer
oo = Observer.ObserverBuilder()
# match the pick-style computation
matched_computations = oo.source(document_model).sequence_from_array("computations", predicate=match_pick)
# select the _display_data_channel of the bound_item of the first computation input variable this observer is
# created as a sub-observer (x) and will be applied to each item from the container (computations).
computation_display_data_channel = oo.x.ordered_sequence_from_array("variables").index(0).prop("bound_item").get("_display_data_channel")
# select the _data_item of the bound_item of the first computation output variable this observer is created as a
# sub-observer (x) and will serve as the base for the further selection of the display items
computation_result_data_item = oo.x.ordered_sequence_from_array("results").index(0).prop("bound_item").get("_data_item")
# select the display_items from each of the display data channels from each of the data items. this serves as
# the base for further selection of the interval graphics.
computation_result_display_items = computation_result_data_item.sequence_from_set("display_data_channels").map(oo.x.prop("display_item"))
# select the graphics items of the container object (display items) and collect them into a list this observer
# is created as a sub-observer (x) and will be applied to each item from the container (display items).
slice_interval_graphic = oo.x.sequence_from_array("graphics", predicate=match_graphic).collect_list()
# select the graphics as a list from each display item and then further collect into a list and flatten that
# list.
computation_result_graphics = computation_result_display_items.map(slice_interval_graphic).collect_list().flatten()
# create the action to connect the various properties. this will be recreated whenever its inputs change.
connect_action = typing.cast(typing.Callable[[Observer.ItemValue], Observer.AbstractAction], functools.partial(ConnectPickDisplay, document_model))
# configure the action (connecting the properties) as each tuple is produced from the matching computations.
matched_computations.for_each(oo.x.tuple(computation_display_data_channel, computation_result_graphics).action(connect_action))
# finally, construct the observer and save it.
self.__observer = oo.make_observable()
def close(self) -> None:
self.__observer.close()
class ConnectMapDisplay(Observer.AbstractAction):
def __init__(self, document_model: DocumentModel, item_value: Observer.ItemValue):
self.__document_model = document_model
self.__implicit_dependency = None
self.__sequence_index_property_connector = None
self.__slice_interval_property_connector = None
if item_value and isinstance(item_value, tuple):
item_value = typing.cast(typing.Tuple[DisplayItem.DisplayDataChannel, typing.Sequence[Graphics.PointGraphic]], item_value)
if len(item_value) == 2 and item_value[0] and item_value[1]:
display_data_channel = item_value[0]
point_graphics = item_value[1]
sequence_index_property_connector_items = list()
collection_point_property_connector_items = list()
sequence_index_property_connector_items.append(Connector.PropertyConnectorItem(display_data_channel, "sequence_index"))
collection_point_property_connector_items.append(Connector.PropertyConnectorItem(display_data_channel, "collection_point"))
for point_graphic in point_graphics:
collection_point_property_connector_items.append(Connector.PropertyConnectorItem(point_graphic, "position"))
for interval_display_data_channel in typing.cast(typing.Sequence[DisplayItem.DisplayDataChannel], point_graphic.container.display_data_channels):
sequence_index_property_connector_items.append(Connector.PropertyConnectorItem(interval_display_data_channel, "sequence_index"))
self.__sequence_index_property_connector = Connector.PropertyConnector(sequence_index_property_connector_items)
self.__slice_interval_property_connector = Connector.PropertyConnector(collection_point_property_connector_items)
self.__implicit_dependency = ImplicitDependency(point_graphics, display_data_channel)
document_model.register_implicit_dependency(self.__implicit_dependency)
def close(self) -> None:
if self.__sequence_index_property_connector:
self.__sequence_index_property_connector.close()
self.__sequence_index_property_connector = None
if self.__slice_interval_property_connector:
self.__slice_interval_property_connector.close()
self.__slice_interval_property_connector = None
if self.__implicit_dependency:
self.__document_model.unregister_implicit_dependency(self.__implicit_dependency)
class ImplicitMapConnection:
def __init__(self, document_model: DocumentModel):
def match_pick(computation: Symbolic.Computation) -> bool:
if computation.get_computation_attribute("connection_type", None) == "map":
return True
if DocumentModel._builtin_processing_descriptions.get(computation.processing_id, dict()).get("attributes", dict()).get("connection_type", None) == "map":
return True
return False
def match_graphic(graphic: Graphics.Graphic) -> bool:
return graphic.role == "collection_index"
oo = Observer.ObserverBuilder()
matched_computations = oo.source(document_model).sequence_from_array("computations", predicate=match_pick)
computation_display_data_channel = oo.x.ordered_sequence_from_array("variables").index(0).prop("bound_item").get("_display_data_channel")
computation_result_data_item = oo.x.ordered_sequence_from_array("results").index(0).prop("bound_item").get("_data_item")
computation_result_display_items = computation_result_data_item.sequence_from_set("display_data_channels").map(oo.x.prop("display_item"))
slice_interval_graphic = oo.x.sequence_from_array("graphics", predicate=match_graphic).collect_list()
computation_result_graphics = computation_result_display_items.map(slice_interval_graphic).collect_list().flatten()
connect_action = typing.cast(typing.Callable[[Observer.ItemValue], Observer.AbstractAction], functools.partial(ConnectMapDisplay, document_model))
matched_computations.for_each(oo.x.tuple(computation_display_data_channel, computation_result_graphics).action(connect_action))
self.__observer = oo.make_observable()
def close(self) -> None:
self.__observer.close()
class IntervalListConnector(Observer.AbstractAction):
def __init__(self, document_model: DocumentModel, item_value: Observer.ItemValue):
self.__document_model = document_model
self.__listeners = list()
self.__implicit_dependency = None
if item_value and isinstance(item_value, tuple):
item_value = typing.cast(typing.Tuple[Graphics.LineProfileGraphic, typing.Sequence[Graphics.IntervalGraphic]], item_value)
if len(item_value) == 2 and item_value[0] and item_value[1] is not None:
line_profile_graphic = item_value[0]
interval_graphics = item_value[1]
def property_changed(key):
if key == "interval":
interval_descriptors = list()
for interval_graphic in interval_graphics:
interval_descriptor = {"interval": interval_graphic.interval, "color": "#F00"}
interval_descriptors.append(interval_descriptor)
line_profile_graphic.interval_descriptors = interval_descriptors
for interval_graphic in interval_graphics:
self.__listeners.append(interval_graphic.property_changed_event.listen(property_changed))
property_changed("interval")
self.__implicit_dependency = ImplicitDependency(interval_graphics, line_profile_graphic)
document_model.register_implicit_dependency(self.__implicit_dependency)
def close(self) -> None:
for listener in self.__listeners:
listener.close()
if self.__implicit_dependency:
self.__document_model.unregister_implicit_dependency(self.__implicit_dependency)
self.__listeners = None
class ImplicitLineProfileIntervalsConnection:
def __init__(self, document_model: DocumentModel):
def match_line_profile(computation: Symbolic.Computation) -> bool:
return computation.processing_id in ("line-profile",)
def match_graphic(graphic: Graphics.Graphic) -> bool:
return isinstance(graphic, Graphics.IntervalGraphic)
oo = Observer.ObserverBuilder()
matched_computations = oo.source(document_model).sequence_from_array("computations", predicate=match_line_profile)
computation_display_data_channel = oo.x.ordered_sequence_from_array("variables").index(1).prop("bound_item").get("_graphic")
interval_graphics = oo.x.sequence_from_array("graphics", predicate=match_graphic).collect_list()
computation_result_data_item = oo.x.ordered_sequence_from_array("results").index(0).prop("bound_item").get("_data_item")
computation_result_display_items = computation_result_data_item.sequence_from_set("display_data_channels").map(oo.x.prop("display_item"))
computation_result_graphics = computation_result_display_items.map(interval_graphics).collect_list().flatten()
connect_action = typing.cast(typing.Callable[[Observer.ItemValue], Observer.AbstractAction], functools.partial(IntervalListConnector, document_model))
matched_computations.for_each(oo.x.tuple(computation_display_data_channel, computation_result_graphics).action(connect_action))
self.__observer = oo.make_observable()
def close(self) -> None:
self.__observer.close()
DocumentModel.register_processing_descriptions(DocumentModel._get_builtin_processing_descriptions())
def evaluate_data(computation) -> DataAndMetadata.DataAndMetadata:
api = PlugInManager.api_broker_fn("~1.0", None)
data_item = DataItem.new_data_item(None)
with contextlib.closing(data_item):
api_data_item = api._new_api_object(data_item)
if computation.expression:
error_text = computation.evaluate_with_target(api, api_data_item)
computation.error_text = error_text
return api_data_item.data_and_metadata
else:
compute_obj, error_text = computation.evaluate(api)
compute_obj.commit()
computation.error_text = error_text
return computation.get_output("target").xdata
|
goodwinnk/intellij-community
|
refs/heads/master
|
python/helpers/py2only/docutils/parsers/rst/languages/zh_tw.py
|
128
|
# -*- coding: utf-8 -*-
# $Id: zh_tw.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'code (translation required)': 'code',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
'math (translation required)': 'math',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
lubosz/cerbero
|
refs/heads/archlinux
|
cerbero/commands/fetch.py
|
12
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.build.cookbook import CookBook
from cerbero.packages.packagesstore import PackagesStore
from cerbero.utils import _, N_, ArgparseArgument, remove_list_duplicates
from cerbero.utils import messages as m
class Fetch(Command):
def __init__(self, args=[]):
args.append(ArgparseArgument('--reset-rdeps', action='store_true',
default=False, help=_('reset the status of reverse '
'dependencies too')))
args.append(ArgparseArgument('--full-reset', action='store_true',
default=False, help=_('reset to extract step if rebuild is needed')))
Command.__init__(self, args)
def fetch(self, cookbook, recipes, no_deps, reset_rdeps, full_reset):
fetch_recipes = []
if not recipes:
fetch_recipes = cookbook.get_recipes_list()
elif no_deps:
fetch_recipes = [cookbook.get_recipe(x) for x in recipes]
else:
for recipe in recipes:
fetch_recipes += cookbook.list_recipe_deps(recipe)
fetch_recipes = remove_list_duplicates (fetch_recipes)
m.message(_("Fetching the following recipes: %s") %
' '.join([x.name for x in fetch_recipes]))
to_rebuild = []
for i in range(len(fetch_recipes)):
recipe = fetch_recipes[i]
m.build_step(i + 1, len(fetch_recipes), recipe, 'Fetch')
recipe.fetch()
bv = cookbook.recipe_built_version(recipe.name)
cv = recipe.built_version()
if bv != cv:
# On different versions, only reset recipe if:
# * forced
# * OR it was fully built already
if full_reset or not cookbook.recipe_needs_build(recipe.name):
to_rebuild.append(recipe)
cookbook.reset_recipe_status(recipe.name)
if reset_rdeps:
for r in cookbook.list_recipe_reverse_deps(recipe.name):
to_rebuild.append(r)
cookbook.reset_recipe_status(r.name)
if to_rebuild:
to_rebuild = sorted(list(set(to_rebuild)), key=lambda r:r.name)
m.message(_("These recipes have been updated and will "
"be rebuilt:\n%s") %
'\n'.join([x.name for x in to_rebuild]))
class FetchRecipes(Fetch):
doc = N_('Fetch the recipes sources')
name = 'fetch'
def __init__(self):
args = [
ArgparseArgument('recipes', nargs='*',
help=_('list of the recipes to fetch (fetch all if none '
'is passed)')),
ArgparseArgument('--no-deps', action='store_true',
default=False, help=_('do not fetch dependencies')),
]
Fetch.__init__(self, args)
def run(self, config, args):
cookbook = CookBook(config)
return self.fetch(cookbook, args.recipes, args.no_deps,
args.reset_rdeps, args.full_reset)
class FetchPackage(Fetch):
doc = N_('Fetch the recipes sources from a package')
name = 'fetch-package'
def __init__(self):
args = [
ArgparseArgument('package', nargs=1,
help=_('package to fetch')),
]
Fetch.__init__(self, args)
def run(self, config, args):
store = PackagesStore(config)
package = store.get_package(args.package[0])
return self.fetch(store.cookbook, package.recipes_dependencies(),
True, args.reset_rdeps, args.full_reset)
register_command(FetchRecipes)
register_command(FetchPackage)
|
nvoron23/socialite
|
refs/heads/master
|
jython/Lib/test/test_fnmatch.py
|
98
|
"""Test cases for the fnmatch module."""
from test import test_support
import unittest
from fnmatch import fnmatch, fnmatchcase
class FnmatchTestCase(unittest.TestCase):
def check_match(self, filename, pattern, should_match=1):
if should_match:
self.assert_(fnmatch(filename, pattern),
"expected %r to match pattern %r"
% (filename, pattern))
else:
self.assert_(not fnmatch(filename, pattern),
"expected %r not to match pattern %r"
% (filename, pattern))
def test_fnmatch(self):
check = self.check_match
check('abc', 'abc')
check('abc', '?*?')
check('abc', '???*')
check('abc', '*???')
check('abc', '???')
check('abc', '*')
check('abc', 'ab[cd]')
check('abc', 'ab[!de]')
check('abc', 'ab[de]', 0)
check('a', '??', 0)
check('a', 'b', 0)
# these test that '\' is handled correctly in character sets;
# see SF bug #???
check('\\', r'[\]')
check('a', r'[!\]')
check('\\', r'[!\]', 0)
def test_main():
test_support.run_unittest(FnmatchTestCase)
if __name__ == "__main__":
test_main()
|
googleads/google-ads-python
|
refs/heads/master
|
google/ads/googleads/v8/resources/types/click_view.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import click_location
from google.ads.googleads.v8.common.types import criteria
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"ClickView",},
)
class ClickView(proto.Message):
r"""A click view with metrics aggregated at each click level,
including both valid and invalid clicks. For non-Search
campaigns, metrics.clicks represents the number of valid and
invalid interactions. Queries including ClickView must have a
filter limiting the results to one day and can be requested for
dates back to 90 days before the time of the request.
Attributes:
resource_name (str):
Output only. The resource name of the click view. Click view
resource names have the form:
``customers/{customer_id}/clickViews/{date (yyyy-MM-dd)}~{gclid}``
gclid (str):
Output only. The Google Click ID.
area_of_interest (google.ads.googleads.v8.common.types.ClickLocation):
Output only. The location criteria matching
the area of interest associated with the
impression.
location_of_presence (google.ads.googleads.v8.common.types.ClickLocation):
Output only. The location criteria matching
the location of presence associated with the
impression.
page_number (int):
Output only. Page number in search results
where the ad was shown.
ad_group_ad (str):
Output only. The associated ad.
campaign_location_target (str):
Output only. The associated campaign location
target, if one exists.
user_list (str):
Output only. The associated user list, if one
exists.
keyword (str):
Output only. The associated keyword, if one
exists and the click corresponds to the SEARCH
channel.
keyword_info (google.ads.googleads.v8.common.types.KeywordInfo):
Output only. Basic information about the
associated keyword, if it exists.
"""
resource_name = proto.Field(proto.STRING, number=1,)
gclid = proto.Field(proto.STRING, number=8, optional=True,)
area_of_interest = proto.Field(
proto.MESSAGE, number=3, message=click_location.ClickLocation,
)
location_of_presence = proto.Field(
proto.MESSAGE, number=4, message=click_location.ClickLocation,
)
page_number = proto.Field(proto.INT64, number=9, optional=True,)
ad_group_ad = proto.Field(proto.STRING, number=10, optional=True,)
campaign_location_target = proto.Field(
proto.STRING, number=11, optional=True,
)
user_list = proto.Field(proto.STRING, number=12, optional=True,)
keyword = proto.Field(proto.STRING, number=13,)
keyword_info = proto.Field(
proto.MESSAGE, number=14, message=criteria.KeywordInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
gilcierweb/simple-realtime-message
|
refs/heads/master
|
public/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py
|
1788
|
#!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
ushatil/wellness-tracker
|
refs/heads/master
|
ws/wellspring/services/vest_service.py
|
1
|
import logging
from wellspring.models import VestSection, VestSubSection
LOGGER = logging.getLogger(__name__)
VEST_SECTIONS = {
"EQUILIBRIUM" : ["SCHOOL", "SELF", "HOME", "WORK"],
"SUPPORT" : ["PROFESSIONALS", "FAMILY", "FRIENDS", "COLLEAGUES"],
"LIFESTYLE" : ["DIET", "EXERCISE", "MEDITATION", "RECREATION"]
}
def add_section(name):
LOGGER.debug("Adding VestSection: " + name)
result = VestSection(section_name=name)
result.save()
return result
def add_subsection(section_name, subsection_name):
LOGGER.debug("Adding VestSubSection: " + section_name + ":" + subsection_name)
vest_section = get_by_name_vest_section(section_name)
result = VestSubSection(vest_section=vest_section, subsection_name=subsection_name)
result.save()
return result
def get_all_vest_section():
LOGGER.debug("Getting all VestSections")
return list(VestSection.objects.all())
def get_all_vest_subsection():
LOGGER.debug("Getting all VestSubSections")
return list(VestSubSection.objects.all())
def get_by_name_vest_section(name):
LOGGER.debug("Getting VestSection by name: " + name)
return VestSection.objects.get(section_name = name)
def get_by_name_vest_subsection(name):
LOGGER.debug("Getting VestSubSection by name: " + name)
return VestSubSection.objects.get(subsection_name = name)
|
gogobook/wagtail
|
refs/heads/master
|
wagtail/contrib/wagtailapi/filters.py
|
20
|
from django.conf import settings
from rest_framework.filters import BaseFilterBackend
from taggit.managers import _TaggableManager
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
from .utils import BadRequestError, pages_for_site
class FieldsFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This performs field level filtering on the result set
Eg: ?title=James Joyce
"""
fields = set(view.get_api_fields(queryset.model)).union({'id'})
for field_name, value in request.GET.items():
if field_name in fields:
field = getattr(queryset.model, field_name, None)
if isinstance(field, _TaggableManager):
for tag in value.split(','):
queryset = queryset.filter(**{field_name + '__name': tag})
# Stick a message on the queryset to indicate that tag filtering has been performed
# This will let the do_search method know that it must raise an error as searching
# and tag filtering at the same time is not supported
queryset._filtered_by_tag = True
else:
queryset = queryset.filter(**{field_name: value})
return queryset
class OrderingFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This applies ordering to the result set
Eg: ?order=title
It also supports reverse ordering
Eg: ?order=-title
And random ordering
Eg: ?order=random
"""
if 'order' in request.GET:
# Prevent ordering while searching
if 'search' in request.GET:
raise BadRequestError("ordering with a search query is not supported")
order_by = request.GET['order']
# Random ordering
if order_by == 'random':
# Prevent ordering by random with offset
if 'offset' in request.GET:
raise BadRequestError("random ordering with offset is not supported")
return queryset.order_by('?')
# Check if reverse ordering is set
if order_by.startswith('-'):
reverse_order = True
order_by = order_by[1:]
else:
reverse_order = False
# Add ordering
if order_by == 'id' or order_by in view.get_api_fields(queryset.model):
queryset = queryset.order_by(order_by)
else:
# Unknown field
raise BadRequestError("cannot order by '%s' (unknown field)" % order_by)
# Reverse order
if reverse_order:
queryset = queryset.reverse()
return queryset
class SearchFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This performs a full-text search on the result set
Eg: ?search=James Joyce
"""
search_enabled = getattr(settings, 'WAGTAILAPI_SEARCH_ENABLED', True)
if 'search' in request.GET:
if not search_enabled:
raise BadRequestError("search is disabled")
# Searching and filtering by tag at the same time is not supported
if getattr(queryset, '_filtered_by_tag', False):
raise BadRequestError("filtering by tag with a search query is not supported")
search_query = request.GET['search']
sb = get_search_backend()
queryset = sb.search(search_query, queryset)
return queryset
class ChildOfFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if 'child_of' in request.GET:
try:
parent_page_id = int(request.GET['child_of'])
assert parent_page_id >= 0
except (ValueError, AssertionError):
raise BadRequestError("child_of must be a positive integer")
site_pages = pages_for_site(request.site)
try:
parent_page = site_pages.get(id=parent_page_id)
queryset = queryset.child_of(parent_page)
queryset._filtered_by_child_of = True
return queryset
except Page.DoesNotExist:
raise BadRequestError("parent page doesn't exist")
return queryset
class DescendantOfFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if 'descendant_of' in request.GET:
if getattr(queryset, '_filtered_by_child_of', False):
raise BadRequestError("filtering by descendant_of with child_of is not supported")
try:
ancestor_page_id = int(request.GET['descendant_of'])
assert ancestor_page_id >= 0
except (ValueError, AssertionError):
raise BadRequestError("descendant_of must be a positive integer")
site_pages = pages_for_site(request.site)
try:
ancestor_page = site_pages.get(id=ancestor_page_id)
return queryset.descendant_of(ancestor_page)
except Page.DoesNotExist:
raise BadRequestError("ancestor page doesn't exist")
return queryset
|
waynecoulson/TV-Show-Downloader
|
refs/heads/master
|
lib/tvdb_api/tvdb_ui.py
|
6
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""Contains included user interfaces for Tvdb show selection.
A UI is a callback. A class, it's __init__ function takes two arguments:
- config, which is the Tvdb config dict, setup in tvdb_api.py
- log, which is Tvdb's logger instance (which uses the logging module). You can
call log.info() log.warning() etc
It must have a method "selectSeries", this is passed a list of dicts, each dict
contains the the keys "name" (human readable show name), and "sid" (the shows
ID as on thetvdb.com). For example:
[{'name': u'Lost', 'sid': u'73739'},
{'name': u'Lost Universe', 'sid': u'73181'}]
The "selectSeries" method must return the appropriate dict, or it can raise
tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show
cannot be found).
A simple example callback, which returns a random series:
>>> import random
>>> from tvdb_ui import BaseUI
>>> class RandomUI(BaseUI):
... def selectSeries(self, allSeries):
... import random
... return random.choice(allSeries)
Then to use it..
>>> from tvdb_api import Tvdb
>>> t = Tvdb(custom_ui = RandomUI)
>>> random_matching_series = t['Lost']
>>> type(random_matching_series)
<class 'tvdb_api.Show'>
"""
__author__ = "dbr/Ben"
__version__ = "1.7.2"
import logging
import warnings
from tvdb_exceptions import tvdb_userabort
def log():
return logging.getLogger(__name__)
class BaseUI:
"""Default non-interactive UI, which auto-selects first results
"""
def __init__(self, config, log = None):
self.config = config
if log is not None:
warnings.warn("the UI's log parameter is deprecated, instead use\n"
"use import logging; logging.getLogger('ui').info('blah')\n"
"The self.log attribute will be removed in the next version")
self.log = logging.getLogger(__name__)
def selectSeries(self, allSeries):
return allSeries[0]
class ConsoleUI(BaseUI):
"""Interactively allows the user to select a show from a console based UI
"""
def _displaySeries(self, allSeries, limit = 6):
"""Helper function, lists series with corresponding ID
"""
if limit is not None:
toshow = allSeries[:limit]
else:
toshow = allSeries
print "TVDB Search Results:"
for i, cshow in enumerate(toshow):
i_show = i + 1 # Start at more human readable number 1 (not 0)
log().debug('Showing allSeries[%s], series %s)' % (i_show, allSeries[i]['seriesname']))
if i == 0:
extra = " (default)"
else:
extra = ""
print "%s -> %s [%s] # http://thetvdb.com/?tab=series&id=%s&lid=%s%s" % (
i_show,
cshow['seriesname'].encode("UTF-8", "ignore"),
cshow['language'].encode("UTF-8", "ignore"),
str(cshow['id']),
cshow['lid'],
extra
)
def selectSeries(self, allSeries):
self._displaySeries(allSeries)
if len(allSeries) == 1:
# Single result, return it!
print "Automatically selecting only result"
return allSeries[0]
if self.config['select_first'] is True:
print "Automatically returning first search result"
return allSeries[0]
while True: # return breaks this loop
try:
print "Enter choice (first number, return for default, 'all', ? for help):"
ans = raw_input()
except KeyboardInterrupt:
raise tvdb_userabort("User aborted (^c keyboard interupt)")
except EOFError:
raise tvdb_userabort("User aborted (EOF received)")
log().debug('Got choice of: %s' % (ans))
try:
selected_id = int(ans) - 1 # The human entered 1 as first result, not zero
except ValueError: # Input was not number
if len(ans.strip()) == 0:
# Default option
log().debug('Default option, returning first series')
return allSeries[0]
if ans == "q":
log().debug('Got quit command (q)')
raise tvdb_userabort("User aborted ('q' quit command)")
elif ans == "?":
print "## Help"
print "# Enter the number that corresponds to the correct show."
print "# a - display all results"
print "# all - display all results"
print "# ? - this help"
print "# q - abort tvnamer"
print "# Press return with no input to select first result"
elif ans.lower() in ["a", "all"]:
self._displaySeries(allSeries, limit = None)
else:
log().debug('Unknown keypress %s' % (ans))
else:
log().debug('Trying to return ID: %d' % (selected_id))
try:
return allSeries[selected_id]
except IndexError:
log().debug('Invalid show number entered!')
print "Invalid number (%s) selected!"
self._displaySeries(allSeries)
#end try
#end while not valid_input
|
solashirai/edx-platform
|
refs/heads/master
|
lms/djangoapps/teams/management/commands/tests/test_reindex_course_team.py
|
33
|
""" Tests for course_team reindex command """
import ddt
import mock
from mock import patch
from django.core.management import call_command, CommandError
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from opaque_keys.edx.keys import CourseKey
from ....tests.factories import CourseTeamFactory
from ....search_indexes import CourseTeamIndexer
from search.search_engine_base import SearchEngine
COURSE_KEY1 = CourseKey.from_string('edx/history/1')
@ddt.ddt
class ReindexCourseTeamTest(SharedModuleStoreTestCase):
"""Tests for the ReindexCourseTeam command"""
def setUp(self):
"""
Set up tests.
"""
super(ReindexCourseTeamTest, self).setUp()
self.team1 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team1')
self.team2 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team2')
self.team3 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team3')
self.search_engine = SearchEngine.get_search_engine(index='index_course_team')
def test_given_no_arguments_raises_command_error(self):
""" Test that raises CommandError for incorrect arguments. """
with self.assertRaisesRegexp(CommandError, ".* requires one or more arguments.*"):
call_command('reindex_course_team')
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_TEAMS': False})
def test_teams_search_flag_disabled_raises_command_error(self):
""" Test that raises CommandError for disabled feature flag. """
with self.assertRaisesRegexp(CommandError, ".*ENABLE_TEAMS must be enabled.*"):
call_command('reindex_course_team', self.team1.team_id)
def test_given_invalid_team_id_raises_command_error(self):
""" Test that raises CommandError for invalid team id. """
team_id = u'team4'
error_str = 'Argument {0} is not a course_team team_id'.format(team_id)
with self.assertRaisesRegexp(CommandError, error_str):
call_command('reindex_course_team', team_id)
@patch.object(CourseTeamIndexer, 'index')
def test_single_team_id(self, mock_index):
""" Test that command indexes a single passed team. """
call_command('reindex_course_team', self.team1.team_id)
mock_index.assert_called_once_with(self.team1)
mock_index.reset_mock()
@patch.object(CourseTeamIndexer, 'index')
def test_multiple_team_id(self, mock_index):
""" Test that command indexes multiple passed teams. """
call_command('reindex_course_team', self.team1.team_id, self.team2.team_id)
mock_index.assert_any_call(self.team1)
mock_index.assert_any_call(self.team2)
mock_index.reset_mock()
@patch.object(CourseTeamIndexer, 'index')
def test_all_teams(self, mock_index):
""" Test that command indexes all teams. """
call_command('reindex_course_team', all=True)
mock_index.assert_any_call(self.team1)
mock_index.assert_any_call(self.team2)
mock_index.assert_any_call(self.team3)
mock_index.reset_mock()
|
SivagnanamCiena/mock-s3
|
refs/heads/master
|
tests/create.py
|
1
|
#!/usr/bin/env python
import boto
from boto.s3.key import Key
OrdinaryCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.OrdinaryCallingFormat')
s3 = boto.connect_s3(host='localhost', port=10001, calling_format=OrdinaryCallingFormat, is_secure=False)
b = s3.create_bucket('mocking')
kwrite = Key(b)
kwrite.key = 'hello.html'
kwrite.set_contents_from_string('this is some really cool html')
kread = Key(b)
kread.key = 'hello.html'
content = kread.get_contents_as_string()
print content
|
pdufour/sqlalchemy
|
refs/heads/master
|
test/orm/test_rel_fn.py
|
27
|
from sqlalchemy.testing import assert_raises_message, eq_, \
AssertsCompiledSQL, is_
from sqlalchemy.testing import fixtures
from sqlalchemy.orm import relationships, foreign, remote
from sqlalchemy import MetaData, Table, Column, ForeignKey, Integer, \
select, ForeignKeyConstraint, exc, func, and_, String, Boolean
from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
from sqlalchemy.testing import mock
class _JoinFixtures(object):
@classmethod
def setup_class(cls):
m = MetaData()
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
cls.right_multi_fk = Table('rgt_multi_fk', m,
Column('id', Integer, primary_key=True),
Column('lid1', Integer, ForeignKey('lft.id')),
Column('lid2', Integer, ForeignKey('lft.id')),
)
cls.selfref = Table('selfref', m,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('selfref.id'))
)
cls.composite_selfref = Table('composite_selfref', m,
Column('id', Integer, primary_key=True),
Column('group_id', Integer, primary_key=True),
Column('parent_id', Integer),
ForeignKeyConstraint(
['parent_id', 'group_id'],
['composite_selfref.id', 'composite_selfref.group_id']
)
)
cls.m2mleft = Table('m2mlft', m,
Column('id', Integer, primary_key=True),
)
cls.m2mright = Table('m2mrgt', m,
Column('id', Integer, primary_key=True),
)
cls.m2msecondary = Table('m2msecondary', m,
Column('lid', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.m2msecondary_no_fks = Table('m2msecondary_no_fks', m,
Column('lid', Integer, primary_key=True),
Column('rid', Integer, primary_key=True),
)
cls.m2msecondary_ambig_fks = Table('m2msecondary_ambig_fks', m,
Column('lid1', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid1', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
Column('lid2', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid2', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.base_w_sub_rel = Table('base_w_sub_rel', m,
Column('id', Integer, primary_key=True),
Column('sub_id', Integer, ForeignKey('rel_sub.id'))
)
cls.rel_sub = Table('rel_sub', m,
Column('id', Integer, ForeignKey('base_w_sub_rel.id'),
primary_key=True)
)
cls.base = Table('base', m,
Column('id', Integer, primary_key=True),
Column('flag', Boolean)
)
cls.sub = Table('sub', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
)
cls.sub_w_base_rel = Table('sub_w_base_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.sub_w_sub_rel = Table('sub_w_sub_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('sub_id', Integer, ForeignKey('sub.id'))
)
cls.right_w_base_rel = Table('right_w_base_rel', m,
Column('id', Integer, primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.three_tab_a = Table('three_tab_a', m,
Column('id', Integer, primary_key=True),
)
cls.three_tab_b = Table('three_tab_b', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id'))
)
cls.three_tab_c = Table('three_tab_c', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id')),
Column('bid', Integer, ForeignKey('three_tab_b.id'))
)
cls.composite_target = Table('composite_target', m,
Column('uid', Integer, primary_key=True),
Column('oid', Integer, primary_key=True),
)
cls.composite_multi_ref = Table('composite_multi_ref', m,
Column('uid1', Integer),
Column('uid2', Integer),
Column('oid', Integer),
ForeignKeyConstraint(("uid1", "oid"),
("composite_target.uid", "composite_target.oid")),
ForeignKeyConstraint(("uid2", "oid"),
("composite_target.uid", "composite_target.oid")),
)
cls.purely_single_col = Table('purely_single_col', m,
Column('path', String)
)
def _join_fixture_overlapping_three_tables(self, **kw):
def _can_sync(*cols):
for c in cols:
if self.three_tab_c.c.contains_column(c):
return False
else:
return True
return relationships.JoinCondition(
self.three_tab_a,
self.three_tab_b,
self.three_tab_a,
self.three_tab_b,
support_sync=False,
can_be_synced_fn=_can_sync,
primaryjoin=and_(
self.three_tab_a.c.id == self.three_tab_b.c.aid,
self.three_tab_c.c.bid == self.three_tab_b.c.id,
self.three_tab_c.c.aid == self.three_tab_a.c.id
)
)
def _join_fixture_m2m(self, **kw):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary,
**kw
)
def _join_fixture_m2m_backref(self, **kw):
"""return JoinCondition in the same way RelationshipProperty
calls it for a backref on an m2m.
"""
j1 = self._join_fixture_m2m()
return j1, relationships.JoinCondition(
self.m2mright,
self.m2mleft,
self.m2mright,
self.m2mleft,
secondary=self.m2msecondary,
primaryjoin=j1.secondaryjoin_minus_local,
secondaryjoin=j1.primaryjoin_minus_local
)
def _join_fixture_o2m(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
**kw
)
def _join_fixture_m2o(self, **kw):
return relationships.JoinCondition(
self.right,
self.left,
self.right,
self.left,
**kw
)
def _join_fixture_o2m_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
**kw
)
def _join_fixture_m2o_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
remote_side=set([self.selfref.c.id]),
**kw
)
def _join_fixture_o2m_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
**kw
)
def _join_fixture_m2o_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
remote_side=set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_o2m_composite_selfref_func_remote_side(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
remote_side=set([self.composite_selfref.c.parent_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func_annotated(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
remote(self.composite_selfref.c.group_id) ==
func.foo(self.composite_selfref.c.group_id),
remote(self.composite_selfref.c.parent_id) ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_compound_expression_1(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.remote(relationships.foreign(
self.right.c.x * self.right.c.y
)),
**kw
)
def _join_fixture_compound_expression_2(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.foreign(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_compound_expression_1_non_annotated(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_base_to_joined_sub(self, **kw):
# see test/orm/inheritance/test_abc_inheritance:TestaTobM2O
# and others there
right = self.base_w_sub_rel.join(self.rel_sub,
self.base_w_sub_rel.c.id == self.rel_sub.c.id
)
return relationships.JoinCondition(
self.base_w_sub_rel,
right,
self.base_w_sub_rel,
self.rel_sub,
primaryjoin=self.base_w_sub_rel.c.sub_id == \
self.rel_sub.c.id,
**kw
)
def _join_fixture_o2m_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
self.base,
self.sub_w_base_rel,
self.base,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id
)
def _join_fixture_m2o_joined_sub_to_sub_on_base(self, **kw):
# this is a late add - a variant of the test case
# in #2491 where we join on the base cols instead. only
# m2o has a problem at the time of this test.
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_base_rel,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id,
)
def _join_fixture_o2m_joined_sub_to_sub(self, **kw):
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_sub_rel,
self.base.c.id == self.sub_w_sub_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_sub_rel,
primaryjoin=self.sub.c.id == self.sub_w_sub_rel.c.sub_id
)
def _join_fixture_m2o_sub_to_joined_sub(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
)
def _join_fixture_m2o_sub_to_joined_sub_func(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
primaryjoin=self.right_w_base_rel.c.base_id == \
func.foo(self.base.c.id)
)
def _join_fixture_o2o_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub,
self.base.c.id == self.sub.c.id)
# see test_relationships->AmbiguousJoinInterpretedAsSelfRef
return relationships.JoinCondition(
left,
self.sub,
left,
self.sub,
)
def _join_fixture_o2m_to_annotated_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
foreign(func.foo(self.right.c.lid)),
**kw
)
def _join_fixture_o2m_to_oldstyle_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
func.foo(self.right.c.lid),
consider_as_foreign_keys=[self.right.c.lid],
**kw
)
def _join_fixture_overlapping_composite_fks(self, **kw):
return relationships.JoinCondition(
self.composite_target,
self.composite_multi_ref,
self.composite_target,
self.composite_multi_ref,
consider_as_foreign_keys=[self.composite_multi_ref.c.uid2,
self.composite_multi_ref.c.oid],
**kw
)
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
def _join_fixture_o2m_o_side_none(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=and_(self.left.c.id == self.right.c.lid,
self.left.c.x == 5),
**kw
)
def _join_fixture_purely_single_o2m(self, **kw):
return relationships.JoinCondition(
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
support_sync=False,
primaryjoin=
self.purely_single_col.c.path.like(
remote(
foreign(
self.purely_single_col.c.path.concat('%')
)
)
)
)
def _join_fixture_purely_single_m2o(self, **kw):
return relationships.JoinCondition(
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
support_sync=False,
primaryjoin=
remote(self.purely_single_col.c.path).like(
foreign(self.purely_single_col.c.path.concat('%'))
)
)
def _join_fixture_remote_local_multiple_ref(self, **kw):
fn = lambda a, b: ((a == b) | (b == a))
return relationships.JoinCondition(
self.selfref, self.selfref,
self.selfref, self.selfref,
support_sync=False,
primaryjoin=fn(
# we're putting a do-nothing annotation on
# "a" so that the left/right is preserved;
# annotation vs. non seems to affect __eq__ behavior
self.selfref.c.sid._annotate({"foo": "bar"}),
foreign(remote(self.selfref.c.sid)))
)
def _join_fixture_inh_selfref_w_entity(self, **kw):
fake_logger = mock.Mock(info=lambda *arg, **kw: None)
prop = mock.Mock(
parent=mock.Mock(),
mapper=mock.Mock(),
logger=fake_logger
)
local_selectable = self.base.join(self.sub)
remote_selectable = self.base.join(self.sub_w_sub_rel)
sub_w_sub_rel__sub_id = self.sub_w_sub_rel.c.sub_id._annotate(
{'parentmapper': prop.mapper})
sub__id = self.sub.c.id._annotate({'parentmapper': prop.parent})
sub_w_sub_rel__flag = self.base.c.flag._annotate(
{"parentmapper": prop.mapper})
return relationships.JoinCondition(
local_selectable, remote_selectable,
local_selectable, remote_selectable,
primaryjoin=and_(
sub_w_sub_rel__sub_id == sub__id,
sub_w_sub_rel__flag == True
),
prop=prop
)
def _assert_non_simple_warning(self, fn):
assert_raises_message(
exc.SAWarning,
"Non-simple column elements in "
"primary join condition for property "
r"None - consider using remote\(\) "
"annotations to mark the remote side.",
fn
)
def _assert_raises_no_relevant_fks(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
r"Could not locate any relevant foreign key columns "
r"for %s join condition '%s' on relationship %s. "
r"Ensure that referencing columns are associated with "
r"a ForeignKey or ForeignKeyConstraint, or are annotated "
r"in the join condition with the foreign\(\) annotation."
% (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_no_equality(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for %s join "
"condition '%s' on relationship %s. "
"Ensure that referencing columns are associated with a "
"ForeignKey or ForeignKeyConstraint, or are annotated in "
r"the join condition with the foreign\(\) annotation. "
"To allow comparison operators other than '==', "
"the relationship can be marked as viewonly=True." % (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_ambig_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are multiple foreign key paths linking the "
"tables via secondary table '%s'. "
"Specify the 'foreign_keys' argument, providing a list "
"of those columns which should be counted as "
"containing a foreign key reference from the "
"secondary table to each of the parent and child tables."
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
% (relname,),
fn, *arg, **kw)
def _assert_raises_no_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables "
"via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify 'primaryjoin' and "
"'secondaryjoin' expressions"
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify a 'primaryjoin' "
"expression."
% (relname,),
fn, *arg, **kw)
class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase,
AssertsCompiledSQL):
def test_determine_local_remote_pairs_o2o_joined_sub_to_base(self):
joincond = self._join_fixture_o2o_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub.c.id)]
)
def test_determine_synchronize_pairs_o2m_to_annotated_func(self):
joincond = self._join_fixture_o2m_to_annotated_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_o2m_to_oldstyle_func(self):
joincond = self._join_fixture_o2m_to_oldstyle_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determinelocal_remote_m2o_joined_sub_to_sub_on_base(self):
joincond = self._join_fixture_m2o_joined_sub_to_sub_on_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub_w_base_rel.c.base_id)]
)
def test_determine_local_remote_base_to_joined_sub(self):
joincond = self._join_fixture_base_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.base_w_sub_rel.c.sub_id, self.rel_sub.c.id)
]
)
def test_determine_local_remote_o2m_joined_sub_to_base(self):
joincond = self._join_fixture_o2m_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[
(self.sub_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_local_remote_m2o_sub_to_joined_sub(self):
joincond = self._join_fixture_m2o_sub_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.right_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_remote_columns_o2m_joined_sub_to_sub(self):
joincond = self._join_fixture_o2m_joined_sub_to_sub()
eq_(
joincond.local_remote_pairs,
[
(self.sub.c.id, self.sub_w_sub_rel.c.sub_id)
]
)
def test_determine_remote_columns_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_local_remote_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_3(self):
joincond = self._join_fixture_compound_expression_1()
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y),
]
)
def test_err_local_remote_compound_1(self):
self._assert_raises_no_relevant_fks(
self._join_fixture_compound_expression_1_non_annotated,
r'lft.x \+ lft.y = rgt.x \* rgt.y',
"None", "primary"
)
def test_determine_remote_columns_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_remote_columns_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.remote_columns,
set([self.right.c.lid])
)
def test_determine_remote_columns_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.sid])
)
def test_determine_local_remote_pairs_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_o2m_composite_selfref_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_rs(self):
# no warning
self._join_fixture_o2m_composite_selfref_func_remote_side()
def test_determine_local_remote_pairs_o2m_overlap_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_m2o_sub_to_joined_sub_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated(self):
joincond = self._join_fixture_o2m_composite_selfref_func_annotated()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_remote_columns_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
eq_(
joincond.remote_columns,
set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id])
)
def test_determine_remote_columns_m2o(self):
joincond = self._join_fixture_m2o()
eq_(
joincond.remote_columns,
set([self.left.c.id])
)
def test_determine_local_remote_pairs_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.local_remote_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.synchronize_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid)]
)
eq_(
joincond.secondary_synchronize_pairs,
[(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_o2m_backref(self):
joincond = self._join_fixture_o2m()
joincond2 = self._join_fixture_m2o(
primaryjoin=joincond.primaryjoin_reverse_remote,
)
eq_(
joincond2.local_remote_pairs,
[(self.right.c.lid, self.left.c.id)]
)
def test_determine_local_remote_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
eq_(
j2.local_remote_pairs,
[
(self.m2mright.c.id, self.m2msecondary.c.rid),
(self.m2mleft.c.id, self.m2msecondary.c.lid),
]
)
def test_determine_local_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_columns,
set([self.m2mleft.c.id])
)
eq_(
j2.local_columns,
set([self.m2mright.c.id])
)
def test_determine_remote_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
eq_(
j2.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
def test_determine_remote_columns_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.id])
)
def test_determine_local_remote_cols_three_tab_viewonly(self):
joincond = self._join_fixture_overlapping_three_tables()
eq_(
joincond.local_remote_pairs,
[(self.three_tab_a.c.id, self.three_tab_b.c.aid)]
)
eq_(
joincond.remote_columns,
set([self.three_tab_b.c.id, self.three_tab_b.c.aid])
)
def test_determine_local_remote_overlapping_composite_fks(self):
joincond = self._join_fixture_overlapping_composite_fks()
eq_(
joincond.local_remote_pairs,
[
(self.composite_target.c.uid, self.composite_multi_ref.c.uid2,),
(self.composite_target.c.oid, self.composite_multi_ref.c.oid,)
]
)
def test_determine_local_remote_pairs_purely_single_col_o2m(self):
joincond = self._join_fixture_purely_single_o2m()
eq_(
joincond.local_remote_pairs,
[(self.purely_single_col.c.path, self.purely_single_col.c.path)]
)
def test_determine_local_remote_pairs_inh_selfref_w_entities(self):
joincond = self._join_fixture_inh_selfref_w_entity()
eq_(
joincond.local_remote_pairs,
[(self.sub.c.id, self.sub_w_sub_rel.c.sub_id)]
)
eq_(
joincond.remote_columns,
set([self.base.c.flag, self.sub_w_sub_rel.c.sub_id])
)
class DirectionTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
def test_determine_direction_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
is_(
joincond.direction,
ONETOMANY
)
def test_determine_direction_o2m(self):
joincond = self._join_fixture_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_m2o(self):
joincond = self._join_fixture_m2o()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_purely_single_o2m(self):
joincond = self._join_fixture_purely_single_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_purely_single_m2o(self):
joincond = self._join_fixture_purely_single_m2o()
is_(joincond.direction, MANYTOONE)
class DetermineJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_determine_join_o2m(self):
joincond = self._join_fixture_o2m()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o(self):
joincond = self._join_fixture_m2o()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_ambiguous_fks_o2m(self):
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship None - "
"there are multiple foreign key paths linking "
"the tables. Specify the 'foreign_keys' argument, "
"providing a list of those columns which "
"should be counted as containing a foreign "
"key reference to the parent table.",
relationships.JoinCondition,
self.left,
self.right_multi_fk,
self.left,
self.right_multi_fk,
)
def test_determine_join_no_fks_o2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", None,
self.left,
self.selfref,
self.left,
self.selfref,
)
def test_determine_join_ambiguous_fks_m2m(self):
self._assert_raises_ambig_join(
relationships.JoinCondition,
"None", self.m2msecondary_ambig_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks
)
def test_determine_join_no_fks_m2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", self.m2msecondary_no_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_no_fks
)
def _join_fixture_fks_ambig_m2m(self):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks,
consider_as_foreign_keys=[
self.m2msecondary_ambig_fks.c.lid1,
self.m2msecondary_ambig_fks.c.rid1]
)
def test_determine_join_w_fks_ambig_m2m(self):
joincond = self._join_fixture_fks_ambig_m2m()
self.assert_compile(
joincond.primaryjoin,
"m2mlft.id = m2msecondary_ambig_fks.lid1"
)
self.assert_compile(
joincond.secondaryjoin,
"m2mrgt.id = m2msecondary_ambig_fks.rid1"
)
class AdaptedJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_join_targets_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = selfref.sid"
)
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "selfref.id = pj.sid"
)
def test_join_targets_o2m_plain(self):
joincond = self._join_fixture_o2m()
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
joincond.child_selectable,
False)
self.assert_compile(
pj, "lft.id = rgt.lid"
)
def test_join_targets_o2m_left_aliased(self):
joincond = self._join_fixture_o2m()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = rgt.lid"
)
def test_join_targets_o2m_right_aliased(self):
joincond = self._join_fixture_o2m()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "lft.id = pj.lid"
)
def test_join_targets_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND composite_selfref.id = pj.parent_id"
)
def test_join_targets_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND pj.id = composite_selfref.parent_id"
)
class LazyClauseTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_lazy_clause_o2m(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid"
)
def test_lazy_clause_o2m_reverse(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns =\
joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1"
)
def test_lazy_clause_o2m_o_side_none(self):
# test for #2948. When the join is "o.id == m.oid AND o.something == something",
# we don't want 'o' brought into the lazy load for 'm'
joincond = self._join_fixture_o2m_o_side_none()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid AND :param_2 = :x_1",
checkparams={'param_1': None, 'param_2': None, 'x_1': 5}
)
def test_lazy_clause_o2m_o_side_none_reverse(self):
# continued test for #2948.
joincond = self._join_fixture_o2m_o_side_none()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1 AND lft.x = :x_1",
checkparams= {'param_1': None, 'x_1': 5}
)
def test_lazy_clause_remote_local_multiple_ref(self):
joincond = self._join_fixture_remote_local_multiple_ref()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = selfref.sid OR selfref.sid = :param_1",
checkparams={'param_1': None}
)
|
BoyGau/linux
|
refs/heads/master
|
tools/perf/scripts/python/export-to-postgresql.py
|
617
|
# export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
|
camradal/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_sys_global.py
|
32
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: bigip_sys_global
short_description: Manage BIG-IP global settings.
description:
- Manage BIG-IP global settings.
version_added: "2.3"
options:
banner_text:
description:
- Specifies the text to present in the advisory banner.
console_timeout:
description:
- Specifies the number of seconds of inactivity before the system logs
off a user that is logged on.
gui_setup:
description:
- C(enable) or C(disabled) the Setup utility in the browser-based
Configuration utility
choices:
- enabled
- disabled
lcd_display:
description:
- Specifies, when C(enabled), that the system menu displays on the
LCD screen on the front of the unit. This setting has no effect
when used on the VE platform.
choices:
- enabled
- disabled
mgmt_dhcp:
description:
- Specifies whether or not to enable DHCP client on the management
interface
choices:
- enabled
- disabled
net_reboot:
description:
- Specifies, when C(enabled), that the next time you reboot the system,
the system boots to an ISO image on the network, rather than an
internal media drive.
choices:
- enabled
- disabled
quiet_boot:
description:
- Specifies, when C(enabled), that the system suppresses informational
text on the console during the boot cycle. When C(disabled), the
system presents messages and informational text on the console during
the boot cycle.
security_banner:
description:
- Specifies whether the system displays an advisory message on the
login screen.
choices:
- enabled
- disabled
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value).
required: false
default: present
choices:
- present
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Disable the setup utility
bigip_sys_global:
gui_setup: "disabled"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
state: "present"
delegate_to: localhost
'''
RETURN = '''
banner_text:
description: The new text to present in the advisory banner.
returned: changed
type: string
sample: "This is a corporate device. Do not touch."
console_timeout:
description: >
The new number of seconds of inactivity before the system
logs off a user that is logged on.
returned: changed
type: integer
sample: 600
gui_setup:
description: The new setting for the Setup utility.
returned: changed
type: string
sample: enabled
lcd_display:
description: The new setting for displaying the system menu on the LCD.
returned: changed
type: string
sample: enabled
mgmt_dhcp:
description: >
The new setting for whether the mgmt interface should DHCP
or not
returned: changed
type: string
sample: enabled
net_reboot:
description: >
The new setting for whether the system should boot to an ISO on the
network or not
returned: changed
type: string
sample: enabled
quiet_boot:
description: >
The new setting for whether the system should suppress information to
the console during boot or not.
returned: changed
type: string
sample: enabled
security_banner:
description: >
The new setting for whether the system should display an advisory message
on the login screen or not
returned: changed
type: string
sample: enabled
'''
try:
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpSysGlobalManager(object):
def __init__(self, *args, **kwargs):
self.changed_params = dict()
self.params = kwargs
self.api = None
def apply_changes(self):
result = dict()
changed = self.apply_to_running_config()
result.update(**self.changed_params)
result.update(dict(changed=changed))
return result
def apply_to_running_config(self):
try:
self.api = self.connect_to_bigip(**self.params)
return self.update_sys_global_settings()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def connect_to_bigip(self, **kwargs):
return ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def read_sys_global_information(self):
settings = self.load_sys_global()
return self.format_sys_global_information(settings)
def load_sys_global(self):
return self.api.tm.sys.global_settings.load()
def get_changed_parameters(self):
result = dict()
current = self.read_sys_global_information()
if self.security_banner_is_changed(current):
result['guiSecurityBanner'] = self.params['security_banner']
if self.banner_text_is_changed(current):
result['guiSecurityBannerText'] = self.params['banner_text']
if self.gui_setup_is_changed(current):
result['guiSetup'] = self.params['gui_setup']
if self.lcd_display_is_changed(current):
result['lcdDisplay'] = self.params['lcd_display']
if self.mgmt_dhcp_is_changed(current):
result['mgmtDhcp'] = self.params['mgmt_dhcp']
if self.net_reboot_is_changed(current):
result['netReboot'] = self.params['net_reboot']
if self.quiet_boot_is_changed(current):
result['quietBoot'] = self.params['quiet_boot']
if self.console_timeout_is_changed(current):
result['consoleInactivityTimeout'] = self.params['console_timeout']
return result
def security_banner_is_changed(self, current):
if self.params['security_banner'] is None:
return False
if 'security_banner' not in current:
return True
if self.params['security_banner'] == current['security_banner']:
return False
else:
return True
def banner_text_is_changed(self, current):
if self.params['banner_text'] is None:
return False
if 'banner_text' not in current:
return True
if self.params['banner_text'] == current['banner_text']:
return False
else:
return True
def gui_setup_is_changed(self, current):
if self.params['gui_setup'] is None:
return False
if 'gui_setup' not in current:
return True
if self.params['gui_setup'] == current['gui_setup']:
return False
else:
return True
def lcd_display_is_changed(self, current):
if self.params['lcd_display'] is None:
return False
if 'lcd_display' not in current:
return True
if self.params['lcd_display'] == current['lcd_display']:
return False
else:
return True
def mgmt_dhcp_is_changed(self, current):
if self.params['mgmt_dhcp'] is None:
return False
if 'mgmt_dhcp' not in current:
return True
if self.params['mgmt_dhcp'] == current['mgmt_dhcp']:
return False
else:
return True
def net_reboot_is_changed(self, current):
if self.params['net_reboot'] is None:
return False
if 'net_reboot' not in current:
return True
if self.params['net_reboot'] == current['net_reboot']:
return False
else:
return True
def quiet_boot_is_changed(self, current):
if self.params['quiet_boot'] is None:
return False
if 'quiet_boot' not in current:
return True
if self.params['quiet_boot'] == current['quiet_boot']:
return False
else:
return True
def console_timeout_is_changed(self, current):
if self.params['console_timeout'] is None:
return False
if 'console_timeout' not in current:
return True
if self.params['console_timeout'] == current['console_timeout']:
return False
else:
return True
def format_sys_global_information(self, settings):
result = dict()
if hasattr(settings, 'guiSecurityBanner'):
result['security_banner'] = str(settings.guiSecurityBanner)
if hasattr(settings, 'guiSecurityBannerText'):
result['banner_text'] = str(settings.guiSecurityBannerText)
if hasattr(settings, 'guiSetup'):
result['gui_setup'] = str(settings.guiSetup)
if hasattr(settings, 'lcdDisplay'):
result['lcd_display'] = str(settings.lcdDisplay)
if hasattr(settings, 'mgmtDhcp'):
result['mgmt_dhcp'] = str(settings.mgmtDhcp)
if hasattr(settings, 'netReboot'):
result['net_reboot'] = str(settings.netReboot)
if hasattr(settings, 'quietBoot'):
result['quiet_boot'] = str(settings.quietBoot)
if hasattr(settings, 'consoleInactivityTimeout'):
result['console_timeout'] = int(settings.consoleInactivityTimeout)
return result
def update_sys_global_settings(self):
params = self.get_changed_parameters()
if params:
self.changed_params = camel_dict_to_snake_dict(params)
if self.params['check_mode']:
return True
else:
return False
self.update_sys_global_settings_on_device(params)
return True
def update_sys_global_settings_on_device(self, params):
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
r = api.tm.sys.global_settings.load()
r.update(**params)
class BigIpSysGlobalModuleConfig(object):
def __init__(self):
self.argument_spec = dict()
self.meta_args = dict()
self.supports_check_mode = True
self.states = ['present']
self.on_off_choices = ['enabled', 'disabled']
self.initialize_meta_args()
self.initialize_argument_spec()
def initialize_meta_args(self):
args = dict(
security_banner=dict(
required=False,
choices=self.on_off_choices,
default=None
),
banner_text=dict(required=False, default=None),
gui_setup=dict(
required=False,
choices=self.on_off_choices,
default=None
),
lcd_display=dict(
required=False,
choices=self.on_off_choices,
default=None
),
mgmt_dhcp=dict(
required=False,
choices=self.on_off_choices,
default=None
),
net_reboot=dict(
required=False,
choices=self.on_off_choices,
default=None
),
quiet_boot=dict(
required=False,
choices=self.on_off_choices,
default=None
),
console_timeout=dict(required=False, type='int', default=None),
state=dict(default='present', choices=['present'])
)
self.meta_args = args
def initialize_argument_spec(self):
self.argument_spec = f5_argument_spec()
self.argument_spec.update(self.meta_args)
def create(self):
return AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=self.supports_check_mode
)
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
config = BigIpSysGlobalModuleConfig()
module = config.create()
try:
obj = BigIpSysGlobalManager(
check_mode=module.check_mode, **module.params
)
result = obj.apply_changes()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
|
techdragon/Js2Py
|
refs/heads/master
|
js2py/legecy_translators/__init__.py
|
135
|
__author__ = 'Piotrek'
|
ConnorGBrewster/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/py/doc/conf.py
|
218
|
# -*- coding: utf-8 -*-
#
# py documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py'
copyright = u'2010, holger krekel et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
import py
release = py.__version__
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'py.tex', u'py Documentation',
u'holger krekel et. al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py', u'py Documentation',
[u'holger krekel et. al.'], 1)
]
autodoc_member_order = "bysource"
autodoc_default_flags = "inherited-members"
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'py'
epub_author = u'holger krekel et. al.'
epub_publisher = u'holger krekel et. al.'
epub_copyright = u'2010, holger krekel et. al.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
hyperized/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/docker/docker_host_info.py
|
10
|
#!/usr/bin/python
#
# (c) 2019 Piotr Wojciechowski <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_host_info
short_description: Retrieves facts about docker host and lists of objects of the services.
description:
- Retrieves facts about a docker host.
- Essentially returns the output of C(docker system info).
- The module also allows to list object names for containers, images, networks and volumes.
It also allows to query information on disk usage.
- The output differs depending on API version of the docker daemon.
- If the docker daemon cannot be contacted or does not meet the API version requirements,
the module will fail.
version_added: "2.8"
options:
containers:
description:
- Whether to list containers.
type: bool
default: no
containers_filters:
description:
- A dictionary of filter values used for selecting containers to delete.
- "For example, C(until: 24h)."
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
for more information on possible filters.
type: dict
images:
description:
- Whether to list images.
type: bool
default: no
images_filters:
description:
- A dictionary of filter values used for selecting images to delete.
- "For example, C(dangling: true)."
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
for more information on possible filters.
type: dict
networks:
description:
- Whether to list networks.
type: bool
default: no
networks_filters:
description:
- A dictionary of filter values used for selecting networks to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
for more information on possible filters.
type: dict
volumes:
description:
- Whether to list volumes.
type: bool
default: no
volumes_filters:
description:
- A dictionary of filter values used for selecting volumes to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
for more information on possible filters.
type: dict
disk_usage:
description:
- Summary information on used disk space by all Docker layers.
- The output is a sum of images, volumes, containers and build cache.
type: bool
default: no
verbose_output:
description:
- When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
then output will contain verbose information about objects matching the full output of API method.
For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
- The verbose output in this module contains only subset of information returned by I(_info) module
for each type of the objects.
type: bool
default: no
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
author:
- Piotr Wojciechowski (@WojciechowskiPiotr)
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.21"
'''
EXAMPLES = '''
- name: Get info on docker host
docker_host_info:
register: result
- name: Get info on docker host and list images
docker_host_info:
images: yes
register: result
- name: Get info on docker host and list images matching the filter
docker_host_info:
images: yes
images_filters:
label: "mylabel"
register: result
- name: Get info on docker host and verbose list images
docker_host_info:
images: yes
verbose_output: yes
register: result
- name: Get info on docker host and used disk space
docker_host_info:
disk_usage: yes
register: result
- debug:
var: result.host_info
'''
RETURN = '''
can_talk_to_docker:
description:
- Will be C(true) if the module can talk to the docker daemon.
returned: both on success and on error
type: bool
host_info:
description:
- Facts representing the basic state of the docker host. Matches the C(docker system info) output.
returned: always
type: dict
volumes:
description:
- List of dict objects containing the basic information about each volume.
Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(volumes) is C(yes)
type: list
networks:
description:
- List of dict objects containing the basic information about each network.
Keys matches the C(docker network ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(networks) is C(yes)
type: list
containers:
description:
- List of dict objects containing the basic information about each container.
Keys matches the C(docker container ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(containers) is C(yes)
type: list
images:
description:
- List of dict objects containing the basic information about each image.
Keys matches the C(docker image ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(images) is C(yes)
type: list
disk_usage:
description:
- Information on summary disk usage by images, containers and volumes on docker host
unless I(verbose_output=yes). See description for I(verbose_output).
returned: When I(disk_usage) is C(yes)
type: dict
'''
import traceback
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
RequestException,
)
from ansible.module_utils._text import to_native
try:
from docker.errors import DockerException, APIError
except ImportError:
# Missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import clean_dict_booleans_for_docker_api
class DockerHostManager(DockerBaseClass):
def __init__(self, client, results):
super(DockerHostManager, self).__init__()
self.client = client
self.results = results
self.verbose_output = self.client.module.params['verbose_output']
listed_objects = ['volumes', 'networks', 'containers', 'images']
self.results['host_info'] = self.get_docker_host_info()
if self.client.module.params['disk_usage']:
self.results['disk_usage'] = self.get_docker_disk_usage_facts()
for docker_object in listed_objects:
if self.client.module.params[docker_object]:
returned_name = docker_object
filter_name = docker_object + "_filters"
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
def get_docker_host_info(self):
try:
return self.client.info()
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
def get_docker_disk_usage_facts(self):
try:
if self.verbose_output:
return self.client.df()
else:
return dict(LayersSize=self.client.df()['LayersSize'])
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
items = None
items_list = []
header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
header_volumes = ['Driver', 'Name']
header_images = ['Id', 'RepoTags', 'Created', 'Size']
header_networks = ['Id', 'Driver', 'Name', 'Scope']
filter_arg = dict()
if filters:
filter_arg['filters'] = filters
try:
if docker_object == 'containers':
items = self.client.containers(**filter_arg)
elif docker_object == 'networks':
items = self.client.networks(**filter_arg)
elif docker_object == 'images':
items = self.client.images(**filter_arg)
elif docker_object == 'volumes':
items = self.client.volumes(**filter_arg)
except APIError as exc:
self.client.fail("Error inspecting docker host for object '%s': %s" %
(docker_object, to_native(exc)))
if self.verbose_output:
if docker_object != 'volumes':
return items
else:
return items['Volumes']
if docker_object == 'volumes':
items = items['Volumes']
for item in items:
item_record = dict()
if docker_object == 'containers':
for key in header_containers:
item_record[key] = item.get(key)
elif docker_object == 'networks':
for key in header_networks:
item_record[key] = item.get(key)
elif docker_object == 'images':
for key in header_images:
item_record[key] = item.get(key)
elif docker_object == 'volumes':
for key in header_volumes:
item_record[key] = item.get(key)
items_list.append(item_record)
return items_list
def main():
argument_spec = dict(
containers=dict(type='bool', default=False),
containers_filters=dict(type='dict'),
images=dict(type='bool', default=False),
images_filters=dict(type='dict'),
networks=dict(type='bool', default=False),
networks_filters=dict(type='dict'),
volumes=dict(type='bool', default=False),
volumes_filters=dict(type='dict'),
disk_usage=dict(type='bool', default=False),
verbose_output=dict(type='bool', default=False),
)
option_minimal_versions = dict(
network_filters=dict(docker_py_version='2.0.2'),
disk_usage=dict(docker_py_version='2.2.0'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.21',
option_minimal_versions=option_minimal_versions,
fail_results=dict(
can_talk_to_docker=False,
),
)
client.fail_results['can_talk_to_docker'] = True
try:
results = dict(
changed=False,
)
DockerHostManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
Klaudit/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
|
124
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for watchlist.py.'''
import unittest2 as unittest
import watchlist
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class WatchListTest(unittest.TestCase):
def test_basic_error_message(self):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(0, line_number)
self.assertEqual('watchlist/general', category)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = watchlist.WatchListChecker('watchlist', error_handler)
checker.check(['{"DEFINTIONS": {}}'])
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
|
umeier/pynmea
|
refs/heads/master
|
examples/simple_app.py
|
7
|
from pynmea.streamer import NMEAStream
data_file = '../tests/test_data/test_data.gps'
with open(data_file, 'r') as data_file_fd:
nmea_stream = NMEAStream(stream_obj=data_file_fd)
next_data = nmea_stream.get_objects()
nmea_objects = []
while next_data:
nmea_objects += next_data
next_data = nmea_stream.get_objects()
# All nmea objects are now in variable nmea_objects
for nmea_ob in nmea_objects:
print nmea_ob.sen_type
|
pap/nupic
|
refs/heads/master
|
src/nupic/frameworks/opf/clamodel.py
|
19
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file clamodel.py
Encapsulation of CLAnetwork that implements the ModelBase.
"""
import copy
import math
import os
import json
import itertools
import logging
import traceback
from collections import deque
from operator import itemgetter
import numpy
from nupic.frameworks.opf.model import Model
from nupic.algorithms.anomaly import Anomaly
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaSpecial, FieldMetaInfo
from nupic.encoders import MultiEncoder, DeltaEncoder
from nupic.engine import Network
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement,
SensorInput,
ClassifierInput,
initLogger)
DEFAULT_LIKELIHOOD_THRESHOLD = 0.0001
DEFAULT_MAX_PREDICTIONS_PER_STEP = 8
DEFAULT_ANOMALY_TRAINRECORDS = 4000
DEFAULT_ANOMALY_THRESHOLD = 1.1
DEFAULT_ANOMALY_CACHESIZE = 10000
def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
def _decorator(self, *args, **kwargs):
if not self.getInferenceType() == InferenceType.TemporalAnomaly:
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator
class NetworkInfo(object):
""" Data type used as return value type by
CLAModel.__createCLANetwork()
"""
def __init__(self, net, statsCollectors):
"""
net: The CLA Network instance
statsCollectors:
Sequence of 0 or more CLAStatistic-based instances
"""
self.net = net
self.statsCollectors = statsCollectors
return
def __repr__(self):
return "NetworkInfo(net=%r, statsCollectors=%r)" % (
self.net, self.statsCollectors)
class CLAModel(Model):
__supportedInferenceKindSet = set((InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.NontemporalClassification,
InferenceType.NontemporalAnomaly,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep))
__myClassName = "CLAModel"
def __init__(self,
sensorParams,
inferenceType=InferenceType.TemporalNextStep,
predictedField=None,
spEnable=True,
spParams={},
# TODO: We can't figure out what this is. Remove?
trainSPNetOnlyIfRequested=False,
tpEnable=True,
tpParams={},
clEnable=True,
clParams={},
anomalyParams={},
minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,
maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP):
"""CLAModel constructor.
Args:
inferenceType: A value from the InferenceType enum class.
predictedField: The field to predict for multistep prediction.
sensorParams: A dictionary specifying the sensor parameters.
spEnable: Whether or not to use a spatial pooler.
spParams: A dictionary specifying the spatial pooler parameters. These
are passed to the spatial pooler.
trainSPNetOnlyIfRequested: If set, don't create an SP network unless the
user requests SP metrics.
tpEnable: Whether to use a temporal pooler.
tpParams: A dictionary specifying the temporal pooler parameters. These
are passed to the temporal pooler.
clEnable: Whether to use the classifier. If false, the classifier will
not be created and no predictions will be generated.
clParams: A dictionary specifying the classifier parameters. These are
are passed to the classifier.
anomalyParams: Anomaly detection parameters
minLikelihoodThreshold: The minimum likelihood value to include in
inferences. Currently only applies to multistep inferences.
maxPredictionsPerStep: Maximum number of predictions to include for
each step in inferences. The predictions with highest likelihood are
included.
"""
if not inferenceType in self.__supportedInferenceKindSet:
raise ValueError("{0} received incompatible inference type: {1}"\
.format(self.__class__, inferenceType))
# Call super class constructor
super(CLAModel, self).__init__(inferenceType)
# self.__restoringFromState is set to True by our __setstate__ method
# and back to False at completion of our _deSerializeExtraData() method.
self.__restoringFromState = False
self.__restoringFromV1 = False
# Intitialize logging
self.__logger = initLogger(self)
self.__logger.debug("Instantiating %s." % self.__myClassName)
self._minLikelihoodThreshold = minLikelihoodThreshold
self._maxPredictionsPerStep = maxPredictionsPerStep
# set up learning parameters (note: these may be replaced via
# enable/disable//SP/TP//Learning methods)
self.__spLearningEnabled = bool(spEnable)
self.__tpLearningEnabled = bool(tpEnable)
# Explicitly exclude the TP if this type of inference doesn't require it
if not InferenceType.isTemporal(self.getInferenceType()) \
or self.getInferenceType() == InferenceType.NontemporalMultiStep:
tpEnable = False
self._netInfo = None
self._hasSP = spEnable
self._hasTP = tpEnable
self._hasCL = clEnable
self._classifierInputEncoder = None
self._predictedFieldIdx = None
self._predictedFieldName = None
self._numFields = None
# init anomaly
windowSize = anomalyParams.get("slidingWindowSize", None)
mode = anomalyParams.get("mode", "pure")
anomalyThreshold = anomalyParams.get("autoDetectThreshold", None)
self._anomalyInst = Anomaly(slidingWindowSize=windowSize, mode=mode,
binaryAnomalyThreshold=anomalyThreshold)
# -----------------------------------------------------------------------
# Create the network
self._netInfo = self.__createCLANetwork(
sensorParams, spEnable, spParams, tpEnable, tpParams, clEnable,
clParams, anomalyParams)
# Initialize Spatial Anomaly detection parameters
if self.getInferenceType() == InferenceType.NontemporalAnomaly:
self._getSPRegion().setParameter("anomalyMode", True)
# Initialize Temporal Anomaly detection parameters
if self.getInferenceType() == InferenceType.TemporalAnomaly:
self._getTPRegion().setParameter("anomalyMode", True)
self._prevPredictedColumns = numpy.array([])
# -----------------------------------------------------------------------
# This flag, if present tells us not to train the SP network unless
# the user specifically asks for the SP inference metric
self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested
self.__numRunCalls = 0
# Tracks whether finishedLearning() has been called
self.__finishedLearning = False
self.__logger.debug("Instantiated %s" % self.__class__.__name__)
self._input = None
return
def getParameter(self, paramName):
if paramName == '__numRunCalls':
return self.__numRunCalls
else:
raise RuntimeError("'%s' parameter is not exposed by clamodel." % \
(paramName))
def resetSequenceStates(self):
""" [virtual method override] Resets the model's sequence states. Normally
called to force the delineation of a sequence, such as between OPF tasks.
"""
if self._hasTP:
# Reset TP's sequence states
self._getTPRegion().executeCommand(['resetSequenceStates'])
self.__logger.debug("CLAModel.resetSequenceStates(): reset temporal "
"pooler's sequence states")
return
def finishLearning(self):
""" [virtual method override] Places the model in a permanent "finished
learning" mode where it will not be able to learn from subsequent input
records.
NOTE: Upon completion of this command, learning may not be resumed on
the given instance of the model (e.g., the implementation may optimize
itself by pruning data structures that are necessary for learning)
"""
assert not self.__finishedLearning
if self._hasSP:
# Finish SP learning
self._getSPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished SP learning")
if self._hasTP:
# Finish temporal network's TP learning
self._getTPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished TP learning")
self.__spLearningEnabled = self.__tpLearningEnabled = False
self.__finishedLearning = True
return
def setFieldStatistics(self,fieldStats):
encoder = self._getEncoder()
# Set the stats for the encoders. The first argument to setFieldStats
# is the field name of the encoder. Since we are using a multiencoder
# we leave it blank, the multiencoder will propagate the field names to the
# underlying encoders
encoder.setFieldStats('',fieldStats)
def enableLearning(self):
"""[override] Turn Learning on for the current model """
super(CLAModel, self).enableLearning()
self.setEncoderLearning(True)
def disableLearning(self):
"""[override] Turn Learning off for the current model """
super(CLAModel, self).disableLearning()
self.setEncoderLearning(False)
def setEncoderLearning(self,learningEnabled):
self._getEncoder().setLearning(learningEnabled)
# Anomaly Accessor Methods
@requireAnomalyModel
def setAnomalyParameter(self, param, value):
"""
Set a parameter of the anomaly classifier within this model.
"""
self._getAnomalyClassifier().setParameter(param, value)
@requireAnomalyModel
def getAnomalyParameter(self, param):
"""
Get a parameter of the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getParameter(param)
@requireAnomalyModel
def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
@requireAnomalyModel
def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
@requireAnomalyModel
def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
def run(self, inputRecord):
""" run one iteration of this model.
args:
inputRecord is a record object formatted according to
nupic.data.RecordStream.getNextRecordDict() result format.
return:
An ModelResult class (see opfutils.py) The contents of
ModelResult.inferences depends on the the specific inference
type of this model, which can be queried by getInferenceType()
"""
assert not self.__restoringFromState
assert inputRecord
results = super(CLAModel, self).run(inputRecord)
self.__numRunCalls += 1
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("CLAModel.run() inputRecord=%s", (inputRecord))
results.inferences = {}
self._input = inputRecord
# -------------------------------------------------------------------------
# Turn learning on or off?
if '_learning' in inputRecord:
if inputRecord['_learning']:
self.enableLearning()
else:
self.disableLearning()
###########################################################################
# Predictions and Learning
###########################################################################
self._sensorCompute(inputRecord)
self._spCompute()
self._tpCompute()
results.sensorInput = self._getSensorInputRecord(inputRecord)
inferences = {}
# TODO: Reconstruction and temporal classification not used. Remove
if self._isReconstructionModel():
inferences = self._reconstructionCompute()
elif self._isMultiStepModel():
inferences = self._multiStepCompute(rawInput=inputRecord)
# For temporal classification. Not used, and might not work anymore
elif self._isClassificationModel():
inferences = self._classificationCompute()
results.inferences.update(inferences)
inferences = self._anomalyCompute()
results.inferences.update(inferences)
# -----------------------------------------------------------------------
# Store the index and name of the predictedField
results.predictedFieldIdx = self._predictedFieldIdx
results.predictedFieldName = self._predictedFieldName
results.classifierInput = self._getClassifierInputRecord(inputRecord)
# =========================================================================
# output
assert (not self.isInferenceEnabled() or results.inferences is not None), \
"unexpected inferences: %r" % results.inferences
#self.__logger.setLevel(logging.DEBUG)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("inputRecord: %r, results: %r" % (inputRecord,
results))
return results
def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory)
def _getClassifierInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'ClassifierInput' object, which contains the mapped
bucket index for input Record
"""
absoluteValue = None
bucketIdx = None
if self._predictedFieldName is not None and self._classifierInputEncoder is not None:
absoluteValue = inputRecord[self._predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
return ClassifierInput(dataRow=absoluteValue,
bucketIndex=bucketIdx)
def _sensorCompute(self, inputRecord):
sensor = self._getSensorRegion()
self._getDataSource().push(inputRecord)
sensor.setParameter('topDownMode', False)
sensor.prepareInputs()
try:
sensor.compute()
except StopIteration as e:
raise Exception("Unexpected StopIteration", e,
"ACTUAL TRACEBACK: %s" % traceback.format_exc())
def _spCompute(self):
sp = self._getSPRegion()
if sp is None:
return
sp.setParameter('topDownMode', False)
sp.setParameter('inferenceMode', self.isInferenceEnabled())
sp.setParameter('learningMode', self.isLearningEnabled())
sp.prepareInputs()
sp.compute()
def _tpCompute(self):
tp = self._getTPRegion()
if tp is None:
return
if (self.getInferenceType() == InferenceType.TemporalAnomaly or
self._isReconstructionModel()):
topDownCompute = True
else:
topDownCompute = False
tp = self._getTPRegion()
tp.setParameter('topDownMode', topDownCompute)
tp.setParameter('inferenceMode', self.isInferenceEnabled())
tp.setParameter('learningMode', self.isLearningEnabled())
tp.prepareInputs()
tp.compute()
def _isReconstructionModel(self):
inferenceType = self.getInferenceType()
inferenceArgs = self.getInferenceArgs()
if inferenceType == InferenceType.TemporalNextStep:
return True
if inferenceArgs:
return inferenceArgs.get('useReconstruction', False)
return False
def _isMultiStepModel(self):
return self.getInferenceType() in (InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
InferenceType.TemporalMultiStep,
InferenceType.TemporalAnomaly)
def _isClassificationModel(self):
return self.getInferenceType() in InferenceType.TemporalClassification
def _multiStepCompute(self, rawInput):
patternNZ = None
if self._getTPRegion() is not None:
tp = self._getTPRegion()
tpOutput = tp.getSelf()._tfdr.infActiveState['t']
patternNZ = tpOutput.reshape(-1).nonzero()[0]
elif self._getSPRegion() is not None:
sp = self._getSPRegion()
spOutput = sp.getOutputData('bottomUpOut')
patternNZ = spOutput.nonzero()[0]
elif self._getSensorRegion() is not None:
sensor = self._getSensorRegion()
sensorOutput = sensor.getOutputData('dataOut')
patternNZ = sensorOutput.nonzero()[0]
else:
raise RuntimeError("Attempted to make multistep prediction without"
"TP, SP, or Sensor regions")
inputTSRecordIdx = rawInput.get('_timestampRecordIdx')
return self._handleCLAClassifierMultiStep(
patternNZ=patternNZ,
inputTSRecordIdx=inputTSRecordIdx,
rawInput=rawInput)
def _classificationCompute(self):
inference = {}
classifier = self._getClassifierRegion()
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', self.isLearningEnabled())
classifier.prepareInputs()
classifier.compute()
# What we get out is the score for each category. The argmax is
# then the index of the winning category
classificationDist = classifier.getOutputData('categoriesOut')
classification = classificationDist.argmax()
probabilities = classifier.getOutputData('categoryProbabilitiesOut')
numCategories = classifier.getParameter('activeOutputCount')
classConfidences = dict(zip(xrange(numCategories), probabilities))
inference[InferenceElement.classification] = classification
inference[InferenceElement.classConfidences] = {0: classConfidences}
return inference
def _reconstructionCompute(self):
if not self.isInferenceEnabled():
return {}
sp = self._getSPRegion()
sensor = self._getSensorRegion()
#--------------------------------------------------
# SP Top-down flow
sp.setParameter('topDownMode', True)
sp.prepareInputs()
sp.compute()
#--------------------------------------------------
# Sensor Top-down flow
sensor.setParameter('topDownMode', True)
sensor.prepareInputs()
sensor.compute()
# Need to call getOutputValues() instead of going through getOutputData()
# because the return values may contain strings, which cannot be passed
# through the Region.cpp code.
# predictionRow is a list of values, one for each field. The value is
# in the same type as the original input to the encoder and may be a
# string for category fields for example.
predictionRow = copy.copy(sensor.getSelf().getOutputValues('temporalTopDownOut'))
predictionFieldEncodings = sensor.getSelf().getOutputValues('temporalTopDownEncodings')
inferences = {}
inferences[InferenceElement.prediction] = tuple(predictionRow)
inferences[InferenceElement.encodings] = tuple(predictionFieldEncodings)
return inferences
def _anomalyCompute(self):
"""
Compute Anomaly score, if required
"""
inferenceType = self.getInferenceType()
inferences = {}
sp = self._getSPRegion()
score = None
if inferenceType == InferenceType.NontemporalAnomaly:
score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ?
elif inferenceType == InferenceType.TemporalAnomaly:
tp = self._getTPRegion()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
sensor = self._getSensorRegion()
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
if not self._predictedFieldName in self._input:
raise ValueError(
"Expected predicted field '%s' in input row, but was not found!"
% self._predictedFieldName
)
# Calculate the anomaly score using the active columns
# and previous predicted columns.
score = self._anomalyInst.compute(
activeColumns,
self._prevPredictedColumns,
inputValue=self._input[self._predictedFieldName])
# Store the predicted columns for the next timestep.
predictedColumns = tp.getOutputData("topDownOut").nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
inferences[InferenceElement.anomalyScore] = score
return inferences
def _handleCLAClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TP) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
inferences[InferenceElement.multiStepBucketLikelihoods] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# CLAClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = CLAModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# calculate likelihood for each bucket
bucketLikelihood = {}
for k in likelihoodsDict.keys():
bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
likelihoodsDict[k])
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# calculate likelihood for each bucket
bucketLikelihoodOffset = {}
for k in offsetDict.keys():
bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
offsetDict[k])
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = offsetDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset
else:
inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = (
absoluteValue + sumDelta + bestActValue)
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = (
likelihoodsDict)
inferences[InferenceElement.multiStepBestPredictions][steps] = (
bestActValue)
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (
bucketLikelihood)
return inferences
@classmethod
def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict
def getRuntimeStats(self):
""" [virtual method override] get runtime statistics specific to this
model, i.e. activeCellOverlapAvg
return:
a dict where keys are statistic names and values are the stats
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret
def getFieldInfo(self, includeClassifierOnlyField=False):
""" [virtual method override]
Returns the sequence of FieldMetaInfo objects specifying this
Model's output; note that this may be different than the list of
FieldMetaInfo objects supplied at initialization (e.g., due to the
transcoding of some input fields into meta-fields, such as datetime
-> dayOfWeek, timeOfDay, etc.)
Returns: List of FieldMetaInfo objects (see description above)
"""
encoder = self._getEncoder()
fieldNames = encoder.getScalarNames()
fieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(fieldNames) == len(fieldTypes)
# Also include the classifierOnly field?
encoder = self._getClassifierOnlyEncoder()
if includeClassifierOnlyField and encoder is not None:
addFieldNames = encoder.getScalarNames()
addFieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(addFieldNames) == len(addFieldTypes)
fieldNames = list(fieldNames) + addFieldNames
fieldTypes = list(fieldTypes) + addFieldTypes
fieldMetaList = map(FieldMetaInfo._make,
zip(fieldNames,
fieldTypes,
itertools.repeat(FieldMetaSpecial.none)))
return tuple(fieldMetaList)
def _getLogger(self):
""" Get the logger for this object. This is a protected method that is used
by the Model to access the logger created by the subclass
return:
A logging.Logger object. Should not be None
"""
return self.__logger
def _getSPRegion(self):
"""
Returns reference to the network's SP region
"""
return self._netInfo.net.regions.get('SP', None)
def _getTPRegion(self):
"""
Returns reference to the network's TP region
"""
return self._netInfo.net.regions.get('TP', None)
def _getSensorRegion(self):
"""
Returns reference to the network's Sensor region
"""
return self._netInfo.net.regions['sensor']
def _getClassifierRegion(self):
"""
Returns reference to the network's Classifier region
"""
if (self._netInfo.net is not None and
"Classifier" in self._netInfo.net.regions):
return self._netInfo.net.regions["Classifier"]
else:
return None
def _getAnomalyClassifier(self):
return self._netInfo.net.regions.get("AnomalyClassifier", None)
def _getEncoder(self):
"""
Returns: sensor region's encoder for the given network
"""
return self._getSensorRegion().getSelf().encoder
def _getClassifierOnlyEncoder(self):
"""
Returns: sensor region's encoder that is sent only to the classifier,
not to the bottom of the network
"""
return self._getSensorRegion().getSelf().disabledEncoder
def _getDataSource(self):
"""
Returns: data source that we installed in sensor region
"""
return self._getSensorRegion().getSelf().dataSource
def __createCLANetwork(self, sensorParams, spEnable, spParams, tpEnable,
tpParams, clEnable, clParams, anomalyParams):
""" Create a CLA network and return it.
description: CLA Model description dictionary (TODO: define schema)
Returns: NetworkInfo instance;
"""
#--------------------------------------------------
# Create the network
n = Network()
#--------------------------------------------------
# Add the Sensor
n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity'])))
sensor = n.regions['sensor'].getSelf()
enabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in enabledEncoders.items():
if params is not None:
classifierOnly = params.pop('classifierOnly', False)
if classifierOnly:
enabledEncoders.pop(name)
# Disabled encoders are encoders that are fed to CLAClassifierRegion but not
# SP or TP Regions. This is to handle the case where the predicted field
# is not fed through the SP/TP. We typically just have one of these now.
disabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in disabledEncoders.items():
if params is None:
disabledEncoders.pop(name)
else:
classifierOnly = params.pop('classifierOnly', False)
if not classifierOnly:
disabledEncoders.pop(name)
encoder = MultiEncoder(enabledEncoders)
sensor.encoder = encoder
sensor.disabledEncoder = MultiEncoder(disabledEncoders)
sensor.dataSource = DataBuffer()
prevRegion = "sensor"
prevRegionWidth = encoder.getWidth()
# SP is not enabled for spatial classification network
if spEnable:
spParams = spParams.copy()
spParams['inputWidth'] = prevRegionWidth
self.__logger.debug("Adding SPRegion; spParams: %r" % spParams)
n.addRegion("SP", "py.SPRegion", json.dumps(spParams))
# Link SP region
n.link("sensor", "SP", "UniformLink", "")
n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut",
destInput="spatialTopDownIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut",
destInput="temporalTopDownIn")
prevRegion = "SP"
prevRegionWidth = spParams['columnCount']
if tpEnable:
tpParams = tpParams.copy()
if prevRegion == 'sensor':
tpParams['inputWidth'] = tpParams['columnCount'] = prevRegionWidth
else:
assert tpParams['columnCount'] == prevRegionWidth
tpParams['inputWidth'] = tpParams['columnCount']
self.__logger.debug("Adding TPRegion; tpParams: %r" % tpParams)
n.addRegion("TP", "py.TPRegion", json.dumps(tpParams))
# Link TP region
n.link(prevRegion, "TP", "UniformLink", "")
if prevRegion != "sensor":
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="topDownIn")
else:
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="temporalTopDownIn")
n.link("sensor", "TP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
prevRegion = "TP"
prevRegionWidth = tpParams['inputWidth']
if clEnable and clParams is not None:
clParams = clParams.copy()
clRegionName = clParams.pop('regionName')
self.__logger.debug("Adding %s; clParams: %r" % (clRegionName,
clParams))
n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams))
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut",
destInput="categoryIn")
n.link(prevRegion, "Classifier", "UniformLink", "")
if self.getInferenceType() == InferenceType.TemporalAnomaly:
anomalyClParams = dict(
trainRecords=anomalyParams.get('autoDetectWaitRecords', None),
cacheSize=anomalyParams.get('anomalyCacheRecords', None)
)
self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tpEnable)
#--------------------------------------------------
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
n.initialize()
return NetworkInfo(net=n, statsCollectors=[])
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with data that shouldn't be pickled stripped out. In particular,
the CLA Network is stripped out because it has it's own serialization
mechanism)
See also: _serializeExtraData()
"""
# Remove ephemeral member variables from state
state = self.__dict__.copy()
state["_netInfo"] = NetworkInfo(net=None,
statsCollectors=self._netInfo.statsCollectors)
for ephemeral in [self.__manglePrivateMemberName("__restoringFromState"),
self.__manglePrivateMemberName("__logger")]:
state.pop(ephemeral)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
See also: _deSerializeExtraData
"""
self.__dict__.update(state)
# Mark beginning of restoration.
#
# self.__restoringFromState will be reset to False upon completion of
# object restoration in _deSerializeExtraData()
self.__restoringFromState = True
# set up logging
self.__logger = initLogger(self)
# =========================================================================
# TODO: Temporary migration solution
if not hasattr(self, "_Model__inferenceType"):
self.__restoringFromV1 = True
self._hasSP = True
if self.__temporalNetInfo is not None:
self._Model__inferenceType = InferenceType.TemporalNextStep
self._netInfo = self.__temporalNetInfo
self._hasTP = True
else:
raise RuntimeError("The Nontemporal inference type is not supported")
self._Model__inferenceArgs = {}
self._Model__learningEnabled = True
self._Model__inferenceEnabled = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from v2
if not hasattr(self, "_netInfo"):
self._hasSP = False
self._hasTP = False
if self.__encoderNetInfo is not None:
self._netInfo = self.__encoderNetInfo
elif self.__nonTemporalNetInfo is not None:
self._netInfo = self.__nonTemporalNetInfo
self._hasSP = True
else:
self._netInfo = self.__temporalNetInfo
self._hasSP = True
self._hasTP = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from when Anomaly was not separate class
if not hasattr(self, "_anomalyInst"):
self._anomalyInst = Anomaly()
# This gets filled in during the first infer because it can only be
# determined at run-time
self._classifierInputEncoder = None
if not hasattr(self, '_minLikelihoodThreshold'):
self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD
if not hasattr(self, '_maxPredictionsPerStep'):
self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP
if not hasattr(self, '_hasCL'):
self._hasCL = (self._getClassifierRegion() is not None)
self.__logger.debug("Restoring %s from state..." % self.__class__.__name__)
def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug("Serializing network...")
self._netInfo.net.save(outputDir)
self.__logger.debug("Finished serializing network")
return
def _deSerializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during deserialization
(after __setstate__) with an external directory path that can be used to
bypass pickle for loading large binary states.
extraDataDir:
Model's extra data directory path
"""
assert self.__restoringFromState
#--------------------------------------------------
# Check to make sure that our Network member wasn't restored from
# serialized data
assert (self._netInfo.net is None), "Network was already unpickled"
#--------------------------------------------------
# Restore the network
stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug(
"(%s) De-serializing network...", self)
self._netInfo.net = Network(stateDir)
self.__logger.debug(
"(%s) Finished de-serializing network", self)
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
self._netInfo.net.initialize()
# Used for backwards compatibility for anomaly classification models.
# Previous versions used the CLAModelClassifierHelper class for utilizing
# the KNN classifier. Current version uses KNNAnomalyClassifierRegion to
# encapsulate all the classifier functionality.
if self.getInferenceType() == InferenceType.TemporalAnomaly:
classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__
if classifierType is 'KNNClassifierRegion':
anomalyClParams = dict(
trainRecords=self._classifier_helper._autoDetectWaitRecords,
cacheSize=self._classifier_helper._history_length,
)
spEnable = (self._getSPRegion() is not None)
tpEnable = True
# Store original KNN region
knnRegion = self._getAnomalyClassifier().getSelf()
# Add new KNNAnomalyClassifierRegion
self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,
spEnable, tpEnable)
# Restore state
self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls
self._getAnomalyClassifier().getSelf()._recordsCache = (
self._classifier_helper.saved_states)
self._getAnomalyClassifier().getSelf().saved_categories = (
self._classifier_helper.saved_categories)
self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion
# Set TP to output neccessary information
self._getTPRegion().setParameter('anomalyMode', True)
# Remove old classifier_helper
del self._classifier_helper
self._netInfo.net.initialize()
#--------------------------------------------------
# Mark end of restoration from state
self.__restoringFromState = False
self.__logger.debug("(%s) Finished restoring from state", self)
return
def _addAnomalyClassifierRegion(self, network, params, spEnable, tpEnable):
"""
Attaches an 'AnomalyClassifier' region to the network. Will remove current
'AnomalyClassifier' region if it exists.
Parameters
-----------
network - network to add the AnomalyClassifier region
params - parameters to pass to the region
spEnable - True if network has an SP region
tpEnable - True if network has a TP region; Currently requires True
"""
allParams = copy.deepcopy(params)
knnParams = dict(k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
allParams.update(knnParams)
# Set defaults if not set
if allParams['trainRecords'] is None:
allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS
if allParams['cacheSize'] is None:
allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE
# Remove current instance if already created (used for deserializing)
if self._netInfo is not None and self._netInfo.net is not None \
and self._getAnomalyClassifier() is not None:
self._netInfo.net.removeRegion('AnomalyClassifier')
network.addRegion("AnomalyClassifier",
"py.KNNAnomalyClassifierRegion",
json.dumps(allParams))
# Attach link to SP
if spEnable:
network.link("SP", "AnomalyClassifier", "UniformLink", "",
srcOutput="bottomUpOut", destInput="spBottomUpOut")
else:
network.link("sensor", "AnomalyClassifier", "UniformLink", "",
srcOutput="dataOut", destInput="spBottomUpOut")
# Attach link to TP
if tpEnable:
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="topDownOut", destInput="tpTopDownOut")
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT")
else:
raise RuntimeError("TemporalAnomaly models require a TP region.")
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):
""" Mangles the given mangled (private) member name; a mangled member name
is one whose name begins with two or more underscores and ends with one
or zero underscores.
privateMemberName:
The private member name (e.g., "__logger")
skipCheck: Pass True to skip test for presence of the demangled member
in our instance.
Returns: The demangled member name (e.g., "_CLAModel__logger")
"""
assert privateMemberName.startswith("__"), \
"%r doesn't start with __" % privateMemberName
assert not privateMemberName.startswith("___"), \
"%r starts with ___" % privateMemberName
assert not privateMemberName.endswith("__"), \
"%r ends with more than one underscore" % privateMemberName
realName = "_" + (self.__myClassName).lstrip("_") + privateMemberName
if not skipCheck:
# This will throw an exception if the member is missing
getattr(self, realName)
return realName
class DataBuffer(object):
"""
A simple FIFO stack. Add data when it's available, and
implement getNextRecordDict() so DataBuffer can be used as a DataSource
in a CLA Network.
Currently, DataBuffer requires the stack to contain 0 or 1 records.
This requirement may change in the future, and is trivially supported
by removing the assertions.
"""
def __init__(self):
self.stack = []
def push(self, data):
assert len(self.stack) == 0
# Copy the data, because sensor's pre-encoding filters (e.g.,
# AutoResetFilter) may modify it. Our caller relies on the input record
# remaining unmodified.
data = data.__class__(data)
self.stack.append(data)
def getNextRecordDict(self):
assert len(self.stack) > 0
return self.stack.pop()
|
glensc/node-gyp
|
refs/heads/master
|
gyp/test/mac/gyptest-xcode-env-order.py
|
86
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that dependent Xcode settings are processed correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'xcode-env-order'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Env vars in 'copies' filenames.
test.built_file_must_exist('Test-copy-brace/main.c', chdir=CHDIR)
test.built_file_must_exist('Test-copy-paren/main.c', chdir=CHDIR)
test.built_file_must_exist('Test-copy-bare/main.c', chdir=CHDIR)
# Env vars in 'actions' filenames and inline actions
test.built_file_must_exist('action-copy-brace.txt', chdir=CHDIR)
test.built_file_must_exist('action-copy-paren.txt', chdir=CHDIR)
test.built_file_must_exist('action-copy-bare.txt', chdir=CHDIR)
# Env vars in 'rules' filenames and inline actions
test.built_file_must_exist('rule-copy-brace.txt', chdir=CHDIR)
test.built_file_must_exist('rule-copy-paren.txt', chdir=CHDIR)
# TODO: see comment in test.gyp for this file.
#test.built_file_must_exist('rule-copy-bare.txt', chdir=CHDIR)
# Env vars in Info.plist.
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, '''\
\t<key>BraceProcessedKey1</key>
\t<string>D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BraceProcessedKey2</key>
\t<string>/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BraceProcessedKey3</key>
\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>ParenProcessedKey1</key>
\t<string>D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>ParenProcessedKey2</key>
\t<string>/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>ParenProcessedKey3</key>
\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BareProcessedKey1</key>
\t<string>D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BareProcessedKey2</key>
\t<string>/Source/Project/Test</string>''')
# NOTE: For bare variables, $PRODUCT_TYPE is not replaced! It _is_ replaced
# if it's not right at the start of the string (e.g. ':$PRODUCT_TYPE'), so
# this looks like an Xcode bug. This bug isn't emulated (yet?), so check this
# only for Xcode.
if test.format == 'xcode':
test.must_contain(info_plist, '''\
\t<key>BareProcessedKey3</key>
\t<string>$PRODUCT_TYPE:D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>MixedProcessedKey</key>
\t<string>/Source/Project:Test:mh_execute</string>''')
test.pass_test()
|
mahendra-r/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/profile_images/views.py
|
50
|
"""
This module implements the upload and remove endpoints of the profile image api.
"""
from contextlib import closing
import datetime
import logging
from django.utils.translation import ugettext as _
from django.utils.timezone import utc
from rest_framework import permissions, status
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.user_api.errors import UserNotFound
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
from openedx.core.lib.api.permissions import IsUserInUrl, IsUserInUrlOrStaff
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image
from .images import validate_uploaded_image, create_profile_images, remove_profile_images, ImageValidationError
log = logging.getLogger(__name__)
LOG_MESSAGE_CREATE = 'Generated and uploaded images %(image_names)s for user %(user_id)s'
LOG_MESSAGE_DELETE = 'Deleted images %(image_names)s for user %(user_id)s'
def _make_upload_dt():
"""
Generate a server-side timestamp for the upload. This is in a separate
function so its behavior can be overridden in tests.
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class ProfileImageUploadView(APIView):
"""
**Use Case**
* Upload an image for the user's profile.
The requesting user must be signed in. The signed in user can only
upload his or her own profile image.
**Example Request**
POST /api/profile_images/v1/{username}/upload
**Example Responses**
When the requesting user tries to upload the image for a different user, the
request returns one of the following responses.
* If the requesting user has staff access, the request returns an HTTP 403
"Forbidden" response.
* If the requesting user does not have staff access, the request returns
an HTTP 404 "Not Found" response.
* If no user matches the "username" parameter, the request returns an HTTP
404 "Not Found" response.
* If the upload could not be performed, the request returns an HTTP 400 "Bad
Request" response with more information.
* If the upload is successful, the request returns an HTTP 204 "No Content"
response with no additional content.
"""
parser_classes = (MultiPartParser, FormParser,)
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrl)
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/upload
"""
# validate request:
# verify that the user's
# ensure any file was sent
if 'file' not in request.FILES:
return Response(
{
"developer_message": u"No file provided for profile image",
"user_message": _(u"No file provided for profile image"),
},
status=status.HTTP_400_BAD_REQUEST
)
# process the upload.
uploaded_file = request.FILES['file']
# no matter what happens, delete the temporary file when we're done
with closing(uploaded_file):
# image file validation.
try:
validate_uploaded_image(uploaded_file)
except ImageValidationError as error:
return Response(
{"developer_message": error.message, "user_message": error.user_message},
status=status.HTTP_400_BAD_REQUEST,
)
# generate profile pic and thumbnails and store them
profile_image_names = get_profile_image_names(username)
create_profile_images(uploaded_file, profile_image_names)
# update the user account to reflect that a profile image is available.
set_has_profile_image(username, True, _make_upload_dt())
log.info(
LOG_MESSAGE_CREATE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
class ProfileImageRemoveView(APIView):
"""
**Use Case**
* Remove all of the profile images associated with the user's account.
The requesting user must be signed in.
Users with staff access can remove profile images for other user
accounts.
Users without staff access can only remove their own profile images.
**Example Request**
POST /api/profile_images/v1/{username}/remove
**Example Responses**
When the requesting user tries to remove the profile image for a
different user, the request returns one of the following responses.
* If the user does not have staff access, the request returns an HTTP
404 "Not Found" response.
* If no user matches the "username" parameter, the request returns an
HTTP 404 "Not Found" response.
* If the image could not be removed, the request returns an HTTP 400
"Bad Request" response with more information.
* If the request successfully removes the image, the request returns
an HTTP 204 "No Content" response with no additional content.
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrlOrStaff)
def post(self, request, username): # pylint: disable=unused-argument
"""
POST /api/profile_images/v1/{username}/remove
"""
try:
# update the user account to reflect that the images were removed.
set_has_profile_image(username, False)
# remove physical files from storage.
profile_image_names = get_profile_image_names(username)
remove_profile_images(profile_image_names)
log.info(
LOG_MESSAGE_DELETE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
|
ProfessionalIT/professionalit-webiste
|
refs/heads/master
|
sdk/google_appengine/lib/django-1.5/django/contrib/localflavor/gb/forms.py
|
110
|
"""
GB-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.gb.gb_regions import GB_NATIONS_CHOICES, GB_REGION_CHOICES
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class GBPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _('Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(GBPostcodeField, self).clean(value)
if value == '':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class GBCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
super(GBCountySelect, self).__init__(attrs, choices=GB_REGION_CHOICES)
class GBNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
super(GBNationSelect, self).__init__(attrs, choices=GB_NATIONS_CHOICES)
|
JioCloud/nova
|
refs/heads/master
|
nova/tests/unit/objects/test_fixed_ip.py
|
46
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
import netaddr
from oslo_utils import timeutils
from nova import exception
from nova.objects import fixed_ip
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_network
from nova.tests.unit.objects import test_objects
fake_fixed_ip = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'address': '192.168.1.100',
'network_id': None,
'virtual_interface_id': None,
'instance_uuid': None,
'allocated': False,
'leased': False,
'reserved': False,
'host': None,
'network': None,
'virtual_interface': None,
'floating_ips': [],
}
class _TestFixedIPObject(object):
def _compare(self, obj, db_obj):
for field in obj.fields:
if field in ('default_route', 'floating_ips'):
continue
if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS:
if obj.obj_attr_is_set(field) and db_obj[field] is not None:
obj_val = obj[field].uuid
db_val = db_obj[field]['uuid']
else:
continue
else:
obj_val = obj[field]
db_val = db_obj[field]
if isinstance(obj_val, netaddr.IPAddress):
obj_val = str(obj_val)
self.assertEqual(db_val, obj_val)
@mock.patch('nova.db.fixed_ip_get')
def test_get_by_id(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123)
get.assert_called_once_with(self.context, 123, get_network=False)
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
def test_get_by_id_with_extras(self, network_get, fixed_get):
db_fixed = dict(fake_fixed_ip,
network=test_network.fake_network)
fixed_get.return_value = db_fixed
fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123,
expected_attrs=['network'])
fixed_get.assert_called_once_with(self.context, 123, get_network=True)
self._compare(fixedip, db_fixed)
self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
self.assertFalse(network_get.called)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_by_address(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4')
get.assert_called_once_with(self.context, '1.2.3.4',
columns_to_join=[])
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get')
def test_get_by_address_with_extras(self, instance_get, network_get,
fixed_get):
db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
instance=fake_instance.fake_db_instance())
fixed_get.return_value = db_fixed
fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
expected_attrs=['network',
'instance'])
fixed_get.assert_called_once_with(self.context, '1.2.3.4',
columns_to_join=['network',
'instance'])
self._compare(fixedip, db_fixed)
self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid)
self.assertFalse(network_get.called)
self.assertFalse(instance_get.called)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get')
def test_get_by_address_with_extras_deleted_instance(self, instance_get,
network_get,
fixed_get):
db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
instance=None)
fixed_get.return_value = db_fixed
fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
expected_attrs=['network',
'instance'])
fixed_get.assert_called_once_with(self.context, '1.2.3.4',
columns_to_join=['network',
'instance'])
self._compare(fixedip, db_fixed)
self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
self.assertIsNone(fixedip.instance)
self.assertFalse(network_get.called)
self.assertFalse(instance_get.called)
@mock.patch('nova.db.fixed_ip_get_by_floating_address')
def test_get_by_floating_address(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
'1.2.3.4')
get.assert_called_once_with(self.context, '1.2.3.4')
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_floating_address')
def test_get_by_floating_address_none(self, get):
get.return_value = None
fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
'1.2.3.4')
get.assert_called_once_with(self.context, '1.2.3.4')
self.assertIsNone(fixedip)
@mock.patch('nova.db.fixed_ip_get_by_network_host')
def test_get_by_network_and_host(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context,
123, 'host')
get.assert_called_once_with(self.context, 123, 'host')
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_associate')
def test_associate(self, associate):
associate.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4',
'fake-uuid')
associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid',
network_id=None, reserved=False)
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_associate_pool')
def test_associate_pool(self, associate):
associate.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123,
'fake-uuid', 'host')
associate.assert_called_with(self.context, 123,
instance_uuid='fake-uuid',
host='host')
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_disassociate')
def test_disassociate_by_address(self, disassociate):
fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4')
disassociate.assert_called_with(self.context, '1.2.3.4')
@mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout')
def test_disassociate_all_by_timeout(self, disassociate):
now = timeutils.utcnow()
now_tz = timeutils.parse_isotime(
timeutils.isotime(now)).replace(
tzinfo=iso8601.iso8601.Utc())
disassociate.return_value = 123
result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context,
'host', now)
self.assertEqual(123, result)
# NOTE(danms): be pedantic about timezone stuff
args, kwargs = disassociate.call_args_list[0]
self.assertEqual(now_tz, args[2])
self.assertEqual((self.context, 'host'), args[:2])
self.assertEqual({}, kwargs)
@mock.patch('nova.db.fixed_ip_create')
def test_create(self, create):
create.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4')
fixedip.create()
create.assert_called_once_with(
self.context, {'address': '1.2.3.4'})
self._compare(fixedip, fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_update')
def test_save(self, update):
update.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
instance_uuid='fake-uuid')
self.assertRaises(exception.ObjectActionError, fixedip.save)
fixedip.obj_reset_changes(['address'])
fixedip.save()
update.assert_called_once_with(self.context, '1.2.3.4',
{'instance_uuid': 'fake-uuid'})
@mock.patch('nova.db.fixed_ip_disassociate')
def test_disassociate(self, disassociate):
fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
instance_uuid='fake-uuid')
fixedip.obj_reset_changes()
fixedip.disassociate()
disassociate.assert_called_once_with(self.context, '1.2.3.4')
self.assertIsNone(fixedip.instance_uuid)
@mock.patch('nova.db.fixed_ip_get_all')
def test_get_all(self, get_all):
get_all.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_all(self.context)
self.assertEqual(1, len(fixedips))
get_all.assert_called_once_with(self.context)
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_get_by_instance(self, get):
get.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context,
'fake-uuid')
self.assertEqual(1, len(fixedips))
get.assert_called_once_with(self.context, 'fake-uuid')
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ip_get_by_host')
def test_get_by_host(self, get):
get.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host')
self.assertEqual(1, len(fixedips))
get.assert_called_once_with(self.context, 'host')
self._compare(fixedips[0], fake_fixed_ip)
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_by_virtual_interface_id(self, get):
get.return_value = [fake_fixed_ip]
fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id(
self.context, 123)
self.assertEqual(1, len(fixedips))
get.assert_called_once_with(self.context, 123)
self._compare(fixedips[0], fake_fixed_ip)
def test_floating_ips_do_not_lazy_load(self):
fixedip = fixed_ip.FixedIP()
self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips)
@mock.patch('nova.db.fixed_ip_bulk_create')
def test_bulk_create(self, bulk):
fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'),
fixed_ip.FixedIP(address='192.168.1.2')]
fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips)
bulk.assert_called_once_with(self.context,
[{'address': '192.168.1.1'},
{'address': '192.168.1.2'}])
@mock.patch('nova.db.network_get_associated_fixed_ips')
def test_get_by_network(self, get):
info = {'address': '1.2.3.4',
'instance_uuid': 'fake-uuid',
'network_id': 0,
'vif_id': 1,
'vif_address': 'de:ad:be:ee:f0:00',
'instance_hostname': 'fake-host',
'instance_updated': datetime.datetime(1955, 11, 5),
'instance_created': datetime.datetime(1955, 11, 5),
'allocated': True,
'leased': True,
'default_route': True,
}
get.return_value = [info]
fixed_ips = fixed_ip.FixedIPList.get_by_network(
self.context, {'id': 0}, host='fake-host')
get.assert_called_once_with(self.context, 0, host='fake-host')
self.assertEqual(1, len(fixed_ips))
fip = fixed_ips[0]
self.assertEqual('1.2.3.4', str(fip.address))
self.assertEqual('fake-uuid', fip.instance_uuid)
self.assertEqual(0, fip.network_id)
self.assertEqual(1, fip.virtual_interface_id)
self.assertTrue(fip.allocated)
self.assertTrue(fip.leased)
self.assertEqual('fake-uuid', fip.instance.uuid)
self.assertEqual('fake-host', fip.instance.hostname)
self.assertIsInstance(fip.instance.created_at, datetime.datetime)
self.assertIsInstance(fip.instance.updated_at, datetime.datetime)
self.assertEqual(1, fip.virtual_interface.id)
self.assertEqual(info['vif_address'], fip.virtual_interface.address)
@mock.patch('nova.db.network_get_associated_fixed_ips')
def test_backport_default_route(self, mock_get):
info = {'address': '1.2.3.4',
'instance_uuid': 'fake-uuid',
'network_id': 0,
'vif_id': 1,
'vif_address': 'de:ad:be:ee:f0:00',
'instance_hostname': 'fake-host',
'instance_updated': datetime.datetime(1955, 11, 5),
'instance_created': datetime.datetime(1955, 11, 5),
'allocated': True,
'leased': True,
'default_route': True,
}
mock_get.return_value = [info]
fixed_ips = fixed_ip.FixedIPList.get_by_network(
self.context, {'id': 0}, host='fake-host')
primitive = fixed_ips[0].obj_to_primitive()
self.assertIn('default_route', primitive['nova_object.data'])
fixed_ips[0].obj_make_compatible(primitive['nova_object.data'], '1.1')
self.assertNotIn('default_route', primitive['nova_object.data'])
class TestFixedIPObject(test_objects._LocalTest,
_TestFixedIPObject):
pass
class TestRemoteFixedIPObject(test_objects._RemoteTest,
_TestFixedIPObject):
pass
|
pavlenko-volodymyr/codingmood
|
refs/heads/master
|
codemood/commits/migrations/0010_auto__add_field_commit_messages.py
|
2
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Commit.messages'
db.add_column(u'commits_commit', 'messages',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Commit.messages'
db.delete_column(u'commits_commit', 'messages')
models = {
u'commits.commit': {
'Meta': {'object_name': 'Commit'},
'code_rate': ('django.db.models.fields.FloatField', [], {}),
'commit_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'messages': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'prev_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'commits.repository': {
'Meta': {'object_name': 'Repository'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['commits']
|
thaumos/ansible-modules-extras
|
refs/heads/devel
|
cloud/cloudstack/cs_snapshot_policy.py
|
41
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_snapshot_policy
short_description: Manages volume snapshot policies on Apache CloudStack based clouds.
description:
- Create, update and delete volume snapshot policies.
version_added: '2.2'
author: "René Moser (@resmo)"
options:
volume:
description:
- Name of the volume.
required: true
interval_type:
description:
- Interval of the snapshot.
required: false
default: 'daily'
choices: [ 'hourly', 'daily', 'weekly', 'monthly' ]
aliases: [ 'interval' ]
max_snaps:
description:
- Max number of snapshots.
required: false
default: 8
aliases: [ 'max' ]
schedule:
description:
- Time the snapshot is scheduled. Required if C(state=present).
- 'Format for C(interval_type=HOURLY): C(MM)'
- 'Format for C(interval_type=DAILY): C(MM:HH)'
- 'Format for C(interval_type=WEEKLY): C(MM:HH:DD (1-7))'
- 'Format for C(interval_type=MONTHLY): C(MM:HH:DD (1-28))'
required: false
default: null
time_zone:
description:
- Specifies a timezone for this command.
required: false
default: 'UTC'
aliases: [ 'timezone' ]
state:
description:
- State of the snapshot policy.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the volume is related to.
required: false
default: null
account:
description:
- Account the volume is related to.
required: false
default: null
project:
description:
- Name of the project the volume is related to.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a snapshot policy daily at 1h00 UTC
- local_action:
module: cs_snapshot_policy
volume: ROOT-478
schedule: '00:1'
max_snaps: 3
# Ensure a snapshot policy hourly at minute 5 UTC
- local_action:
module: cs_snapshot_policy
volume: ROOT-478
schedule: '5'
interval_type: hourly
max_snaps: 1
# Ensure a snapshot policy weekly on Sunday at 05h00, TZ Europe/Zurich
- local_action:
module: cs_snapshot_policy
volume: ROOT-478
schedule: '00:5:1'
interval_type: weekly
max_snaps: 1
time_zone: 'Europe/Zurich'
# Ensure a snapshot policy is absent
- local_action:
module: cs_snapshot_policy
volume: ROOT-478
interval_type: hourly
state: absent
'''
RETURN = '''
---
id:
description: UUID of the snapshot policy.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
interval_type:
description: interval type of the snapshot policy.
returned: success
type: string
sample: daily
schedule:
description: schedule of the snapshot policy.
returned: success
type: string
sample:
max_snaps:
description: maximum number of snapshots retained.
returned: success
type: int
sample: 10
time_zone:
description: the time zone of the snapshot policy.
returned: success
type: string
sample: Etc/UTC
volume:
description: the volume of the snapshot policy.
returned: success
type: string
sample: Etc/UTC
zone:
description: Name of zone the volume is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the volume is related to.
returned: success
type: string
sample: Production
account:
description: Account the volume is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the volume is related to.
returned: success
type: string
sample: example domain
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackSnapshotPolicy(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSnapshotPolicy, self).__init__(module)
self.returns = {
'schedule': 'schedule',
'timezone': 'time_zone',
'maxsnaps': 'max_snaps',
}
self.interval_types = {
'hourly': 0,
'daily': 1,
'weekly': 2,
'monthly': 3,
}
self.volume = None
def get_interval_type(self):
interval_type = self.module.params.get('interval_type')
return self.interval_types[interval_type]
def get_volume(self, key=None):
if self.volume:
return self._get_by_key(key, self.volume)
args = {
'name': self.module.params.get('volume'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
volumes = self.cs.listVolumes(**args)
if volumes:
self.volume = volumes['volume'][0]
return self._get_by_key(key, self.volume)
return None
def get_snapshot_policy(self):
args = {
'volumeid': self.get_volume(key='id')
}
policies = self.cs.listSnapshotPolicies(**args)
if policies:
for policy in policies['snapshotpolicy']:
if policy['intervaltype'] == self.get_interval_type():
return policy
return None
def present_snapshot_policy(self):
required_params = [
'schedule',
]
self.module.fail_on_missing_params(required_params=required_params)
policy = self.get_snapshot_policy()
args = {
'intervaltype': self.module.params.get('interval_type'),
'schedule': self.module.params.get('schedule'),
'maxsnaps': self.module.params.get('max_snaps'),
'timezone': self.module.params.get('time_zone'),
'volumeid': self.get_volume(key='id')
}
if not policy or (policy and self.has_changed(policy, args)):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.createSnapshotPolicy(**args)
policy = res['snapshotpolicy']
if 'errortext' in policy:
self.module.fail_json(msg="Failed: '%s'" % policy['errortext'])
return policy
def absent_snapshot_policy(self):
policy = self.get_snapshot_policy()
if policy:
self.result['changed'] = True
args = {
'id': policy['id']
}
if not self.module.check_mode:
res = self.cs.deleteSnapshotPolicies(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % policy['errortext'])
return policy
def get_result(self, policy):
super(AnsibleCloudStackSnapshotPolicy, self).get_result(policy)
if policy and 'intervaltype' in policy:
for key, value in self.interval_types.items():
if value == policy['intervaltype']:
self.result['interval_type'] = key
break
volume = self.get_volume()
if volume:
volume_results = {
'volume': volume.get('name'),
'zone': volume.get('zonename'),
'project': volume.get('project'),
'account': volume.get('account'),
'domain': volume.get('domain'),
}
self.result.update(volume_results)
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
volume=dict(required=True),
interval_type=dict(default='daily', choices=['hourly', 'daily', 'weekly', 'monthly'], aliases=['interval']),
schedule=dict(default=None),
time_zone=dict(default='UTC', aliases=['timezone']),
max_snaps=dict(type='int', default=8, aliases=['max']),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_snapshot_policy = AnsibleCloudStackSnapshotPolicy(module)
state = module.params.get('state')
if state in ['absent']:
policy = acs_snapshot_policy.absent_snapshot_policy()
else:
policy = acs_snapshot_policy.present_snapshot_policy()
result = acs_snapshot_policy.get_result(policy)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
tomfotherby/ansible-modules-core
|
refs/heads/devel
|
network/basics/slurp.py
|
134
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: slurp
version_added: historical
short_description: Slurps a file from remote nodes
description:
- This module works like M(fetch). It is used for fetching a base64-
encoded blob containing the data in a remote file.
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a
directory.
required: true
default: null
aliases: []
notes:
- "See also: M(fetch)"
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
ansible host -m slurp -a 'src=/tmp/xx'
host | success >> {
"content": "aGVsbG8gQW5zaWJsZSB3b3JsZAo=",
"encoding": "base64"
}
'''
import base64
def main():
module = AnsibleModule(
argument_spec = dict(
src = dict(required=True, aliases=['path']),
),
supports_check_mode=True
)
source = os.path.expanduser(module.params['src'])
if not os.path.exists(source):
module.fail_json(msg="file not found: %s" % source)
if not os.access(source, os.R_OK):
module.fail_json(msg="file is not readable: %s" % source)
data = base64.b64encode(file(source).read())
module.exit_json(content=data, source=source, encoding='base64')
# import module snippets
from ansible.module_utils.basic import *
main()
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey_info.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey_info
short_description: Gather information about DigitalOcean SSH keys
description:
- This module can be used to gather information about DigitalOcean SSH keys.
- This module replaces the C(digital_ocean_sshkey_facts) module.
version_added: "2.9"
author: "Patrick Marques (@pmarques)"
extends_documentation_fragment: digital_ocean.documentation
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- digital_ocean_sshkey_info:
oauth_token: "{{ my_do_key }}"
register: ssh_keys
- set_fact:
pubkey: "{{ item.public_key }}"
loop: "{{ ssh_keys.data|json_query(ssh_pubkey) }}"
vars:
ssh_pubkey: "[?name=='ansible_ctrl']"
- debug:
msg: "{{ pubkey }}"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#list-all-keys
data:
description: List of SSH keys on DigitalOcean
returned: success and no resource constraint
type: dict
sample: [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "My SSH Public Key"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
def core(module):
rest = DigitalOceanHelper(module)
response = rest.get("account/keys")
status_code = response.status_code
json = response.json
if status_code == 200:
module.exit_json(changed=False, data=json['ssh_keys'])
else:
module.fail_json(msg='Error fetching SSH Key information [{0}: {1}]'.format(
status_code, response.json['message']))
def main():
module = AnsibleModule(
argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(),
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
|
del680202/MachineLearning-memo
|
refs/heads/master
|
src/pla/pla-example.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
import matplotlib.pyplot as plt
import numpy as np
#網路上找的dataset 可以線性分割
dataset = np.array([
((1, -0.4, 0.3), -1),
((1, -0.3, -0.1), -1),
((1, -0.2, 0.4), -1),
((1, -0.1, 0.1), -1),
((1, 0.9, -0.5), 1),
((1, 0.7, -0.9), 1),
((1, 0.8, 0.2), 1),
((1, 0.4, -0.6), 1)])
#判斷有沒有分類錯誤,並列印錯誤率
def check_error(w, dataset):
result = None
error = 0
for x, s in dataset:
x = np.array(x)
if int(np.sign(w.T.dot(x))) != s:
result = x, s
error += 1
print "error=%s/%s" % (error, len(dataset))
return result
#PLA演算法實作
def pla(dataset):
w = np.zeros(3)
while check_error(w, dataset) is not None:
x, s = check_error(w, dataset)
w += s * x
return w
#執行
w = pla(dataset)
#畫圖
ps = [v[0] for v in dataset]
fig = plt.figure()
ax1 = fig.add_subplot(111)
#dataset前半後半已經分割好 直接畫就是
ax1.scatter([v[1] for v in ps[:4]], [v[2] for v in ps[:4]], s=10, c='b', marker="o", label='O')
ax1.scatter([v[1] for v in ps[4:]], [v[2] for v in ps[4:]], s=10, c='r', marker="x", label='X')
l = np.linspace(-2,2)
a,b = -w[1]/w[2], -w[0]/w[2]
ax1.plot(l, a*l + b, 'b-')
plt.legend(loc='upper left');
plt.show()
|
jasonkying/pip
|
refs/heads/develop
|
tests/unit/test_unit_outdated.py
|
26
|
import sys
import datetime
import os
from contextlib import contextmanager
import freezegun
import pytest
import pretend
from pip._vendor import lockfile
from pip.utils import outdated
@pytest.mark.parametrize(
['stored_time', 'newver', 'check', 'warn'],
[
('1970-01-01T10:00:00Z', '2.0', True, True),
('1970-01-01T10:00:00Z', '1.0', True, False),
('1970-01-06T10:00:00Z', '1.0', False, False),
('1970-01-06T10:00:00Z', '2.0', False, True),
]
)
def test_pip_version_check(monkeypatch, stored_time, newver, check, warn):
monkeypatch.setattr(outdated, 'get_installed_version', lambda name: '1.0')
resp = pretend.stub(
raise_for_status=pretend.call_recorder(lambda: None),
json=pretend.call_recorder(lambda: {"releases": {newver: {}}}),
)
session = pretend.stub(
get=pretend.call_recorder(lambda u, headers=None: resp),
)
fake_state = pretend.stub(
state={"last_check": stored_time, 'pypi_version': '1.0'},
save=pretend.call_recorder(lambda v, t: None),
)
monkeypatch.setattr(
outdated, 'load_selfcheck_statefile', lambda: fake_state
)
monkeypatch.setattr(outdated.logger, 'warning',
pretend.call_recorder(lambda s: None))
monkeypatch.setattr(outdated.logger, 'debug',
pretend.call_recorder(lambda s, exc_info=None: None))
with freezegun.freeze_time(
"1970-01-09 10:00:00",
ignore=[
"six.moves",
"pip._vendor.six.moves",
"pip._vendor.requests.packages.urllib3.packages.six.moves",
]):
outdated.pip_version_check(session)
assert not outdated.logger.debug.calls
if check:
assert session.get.calls == [pretend.call(
"https://pypi.python.org/pypi/pip/json",
headers={"Accept": "application/json"}
)]
assert fake_state.save.calls == [
pretend.call(newver, datetime.datetime(1970, 1, 9, 10, 00, 00)),
]
if warn:
assert len(outdated.logger.warning.calls) == 1
else:
assert len(outdated.logger.warning.calls) == 0
else:
assert session.get.calls == []
assert fake_state.save.calls == []
def test_virtualenv_state(monkeypatch):
CONTENT = '{"last_check": "1970-01-02T11:00:00Z", "pypi_version": "1.0"}'
fake_file = pretend.stub(
read=pretend.call_recorder(lambda: CONTENT),
write=pretend.call_recorder(lambda s: None),
)
@pretend.call_recorder
@contextmanager
def fake_open(filename, mode='r'):
yield fake_file
monkeypatch.setattr(outdated, 'open', fake_open, raising=False)
monkeypatch.setattr(outdated, 'running_under_virtualenv',
pretend.call_recorder(lambda: True))
monkeypatch.setattr(sys, 'prefix', 'virtually_env')
state = outdated.load_selfcheck_statefile()
state.save('2.0', datetime.datetime.utcnow())
assert len(outdated.running_under_virtualenv.calls) == 1
expected_path = os.path.join('virtually_env', 'pip-selfcheck.json')
assert fake_open.calls == [
pretend.call(expected_path),
pretend.call(expected_path, 'w'),
]
# json.dumps will call this a number of times
assert len(fake_file.write.calls)
def test_global_state(monkeypatch):
CONTENT = '''{"pip_prefix": {"last_check": "1970-01-02T11:00:00Z",
"pypi_version": "1.0"}}'''
fake_file = pretend.stub(
read=pretend.call_recorder(lambda: CONTENT),
write=pretend.call_recorder(lambda s: None),
)
@pretend.call_recorder
@contextmanager
def fake_open(filename, mode='r'):
yield fake_file
monkeypatch.setattr(outdated, 'open', fake_open, raising=False)
@pretend.call_recorder
@contextmanager
def fake_lock(filename):
yield
monkeypatch.setattr(outdated, "check_path_owner", lambda p: True)
monkeypatch.setattr(lockfile, 'LockFile', fake_lock)
monkeypatch.setattr(os.path, "exists", lambda p: True)
monkeypatch.setattr(outdated, 'running_under_virtualenv',
pretend.call_recorder(lambda: False))
monkeypatch.setattr(outdated, 'USER_CACHE_DIR', 'cache_dir')
monkeypatch.setattr(sys, 'prefix', 'pip_prefix')
state = outdated.load_selfcheck_statefile()
state.save('2.0', datetime.datetime.utcnow())
assert len(outdated.running_under_virtualenv.calls) == 1
expected_path = os.path.join('cache_dir', 'selfcheck.json')
assert fake_lock.calls == [pretend.call(expected_path)]
assert fake_open.calls == [
pretend.call(expected_path),
pretend.call(expected_path),
pretend.call(expected_path, 'w'),
]
# json.dumps will call this a number of times
assert len(fake_file.write.calls)
|
KronoSKoderS/CalPack
|
refs/heads/prod
|
tests/test_IntFields.py
|
1
|
import unittest
import ctypes
from calpack import models
class Test_IntField(unittest.TestCase):
def setUp(self):
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
self.two_int_field_packet = two_int_field_packet
def test_intfield_set_valid_values(self):
"""
This test verifies that setting an integer field of a packet is done
correctly. This also verifies that the internal c structure is properly
setup as well.
"""
p = self.two_int_field_packet()
v1 = 100
v2 = -100
p.int_field = v1
p.int_field_signed = v2
self.assertEqual(p.int_field, v1)
self.assertEqual(p._Packet__c_pkt.int_field, v1)
self.assertEqual(p.int_field_signed, v2)
self.assertEqual(p._Packet__c_pkt.int_field_signed, v2)
def test_intfield_raises_TypeError_when_setting_non_int_value(self):
"""
This test verifies that a "TypeError" is raised when setting a value
other than an integer for the IntField. The following types are checked
* String
* Float
* list
"""
p = self.two_int_field_packet()
with self.assertRaises(TypeError):
p.int_field = ""
with self.assertRaises(TypeError):
p.int_field_signed = 3.14
with self.assertRaises(TypeError):
p.int_field = [0] * 12
def test_intfield_raises_TypeError_when_setting_signed_to_nonsigned(self):
"""
This test verifies that a "TypeError" is raised when setting a
non-signed value to a signed value.
"""
p = self.two_int_field_packet()
with self.assertRaises(TypeError):
p.int_field = -123
def test_intfield_set_valid_value_from_other_field(self):
"""
This test verifies that setting an integer field from another field is
done properly and doesn't change the field type.
"""
p = self.two_int_field_packet()
p2 = self.two_int_field_packet()
v1 = 123
p.int_field = v1
p2.int_field = p.int_field
self.assertEqual(p.int_field, p2.int_field)
self.assertEqual(p2._Packet__c_pkt.int_field, v1)
def test_intfield_with_variable_bit_length(self):
"""
This test verifies that setting an integer value of variable size is
correctly exported to the to_bytes function. This also tests the
ability to set a value for the packet upon instantiation.
"""
class int_packet_with_varied_sized_int_fields(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
int_field_4_bits = models.IntField16(bit_len=4)
int_field_12_bits = models.IntField16(bit_len=12)
pkt = int_packet_with_varied_sized_int_fields(
int_field=0xbeef,
int_field_signed=0xdead,
int_field_4_bits=0xa,
int_field_12_bits=0xbc
)
class c_pkt_struct(ctypes.Structure):
_pack_ = 1
_fields_ = (
('int_field', ctypes.c_uint),
('int_field_signed', ctypes.c_int),
('int_field_4_bits', ctypes.c_uint16, 4),
('int_field_12_bits', ctypes.c_uint16, 12),
)
c_pkt = c_pkt_struct()
c_pkt.int_field = 0xbeef
c_pkt.int_field_signed = 0xdead
c_pkt.int_field_4_bits = 0xa
c_pkt.int_field_12_bits = 0xbc
b_str = ctypes.string_at(ctypes.addressof(c_pkt), ctypes.sizeof(c_pkt))
self.assertEqual(b_str, pkt.to_bytes())
def test_intfield_raises_ValueError_with_invalid_bit_len(self):
with self.assertRaises(ValueError):
int_field = models.IntField(bit_len=0)
with self.assertRaises(ValueError):
int_field = models.IntField(bit_len=-1)
with self.assertRaises(ValueError):
int_field = models.IntField(bit_len=65)
def test_intfield_multi_int_fields(self):
class all_int_fields(models.Packet):
int_field = models.IntField()
int_field8 = models.IntField8()
int_field16 = models.IntField16()
int_field32 = models.IntField32()
int_field64 = models.IntField64()
pkt = all_int_fields()
pkt.int_field = 1
pkt.int_field8 = 2
pkt.int_field16 = 3
pkt.int_field32 = 4
pkt.int_field64 = 5
self.assertEqual(pkt.int_field, 1)
self.assertEqual(pkt._Packet__c_pkt.int_field, 1)
self.assertEqual(pkt.int_field8, 2)
self.assertEqual(pkt._Packet__c_pkt.int_field8, 2)
self.assertEqual(pkt.int_field16, 3)
self.assertEqual(pkt._Packet__c_pkt.int_field16, 3)
self.assertEqual(pkt.int_field32, 4)
self.assertEqual(pkt._Packet__c_pkt.int_field32, 4)
self.assertEqual(pkt.int_field64, 5)
self.assertEqual(pkt._Packet__c_pkt.int_field64, 5)
if __name__ == '__main__':
unittest.main()
|
stewnorriss/letsencrypt
|
refs/heads/master
|
acme/acme/other.py
|
14
|
"""Other ACME objects."""
import functools
import logging
import os
from acme import jose
logger = logging.getLogger(__name__)
class Signature(jose.JSONObjectWithFields):
"""ACME signature.
:ivar .JWASignature alg: Signature algorithm.
:ivar bytes sig: Signature.
:ivar bytes nonce: Nonce.
:ivar .JWK jwk: JWK.
"""
NONCE_SIZE = 16
"""Minimum size of nonce in bytes."""
alg = jose.Field('alg', decoder=jose.JWASignature.from_json)
sig = jose.Field('sig', encoder=jose.encode_b64jose,
decoder=jose.decode_b64jose)
nonce = jose.Field(
'nonce', encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE, minimum=True))
jwk = jose.Field('jwk', decoder=jose.JWK.from_json)
@classmethod
def from_msg(cls, msg, key, nonce=None, nonce_size=None, alg=jose.RS256):
"""Create signature with nonce prepended to the message.
:param bytes msg: Message to be signed.
:param key: Key used for signing.
:type key: `cryptography.hazmat.primitives.assymetric.rsa.RSAPrivateKey`
(optionally wrapped in `.ComparableRSAKey`).
:param bytes nonce: Nonce to be used. If None, nonce of
``nonce_size`` will be randomly generated.
:param int nonce_size: Size of the automatically generated nonce.
Defaults to :const:`NONCE_SIZE`.
:param .JWASignature alg:
"""
nonce_size = cls.NONCE_SIZE if nonce_size is None else nonce_size
nonce = os.urandom(nonce_size) if nonce is None else nonce
msg_with_nonce = nonce + msg
sig = alg.sign(key, nonce + msg)
logger.debug('%r signed as %r', msg_with_nonce, sig)
return cls(alg=alg, sig=sig, nonce=nonce,
jwk=alg.kty(key=key.public_key()))
def verify(self, msg):
"""Verify the signature.
:param bytes msg: Message that was used in signing.
"""
# self.alg is not Field, but JWA | pylint: disable=no-member
return self.alg.verify(self.jwk.key, self.nonce + msg, self.sig)
|
shiyemin/shadowsocks
|
refs/heads/master
|
shadowsocks/udprelay.py
|
924
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
|
FabriceSalvaire/PyDVI
|
refs/heads/master
|
PyDvi/Font/PkFont.py
|
1
|
####################################################################################################
#
# PyDvi - A Python Library to Process DVI Stream
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
__all__ = ['PkFont']
####################################################################################################
from ..Tools.Logging import print_card
from .Font import Font, font_types
from .PkFontParser import PkFontParser
####################################################################################################
class PkFont(Font):
"""This class implements the packed font type in the font manager.
To create a packed font instance use::
font = PkFont(font_manager, font_id, name)
where *font_manager* is a :class:`PyDvi.FontManager.FontManager` instance, *font_id* is the font
id provided by the font manager and *name* is the font name, "cmr10" for example. The packed
font file is parsed using a :class:`PyDvi.PkFontParser.PkFontParser` instance.
"""
font_type = font_types.Pk
font_type_string = 'TeX Packed Font'
extension = 'pk'
##############################################
def __init__(self, font_manager, font_id, name):
super(PkFont, self).__init__(font_manager, font_id, name)
self._glyphs = {}
PkFontParser.parse(self)
##############################################
def __getitem__(self, char_code):
""" Return the :class:`PyDvi.PkGlyph.PkGlyph` instance for the char code *char_code*. """
return self._glyphs[char_code]
##############################################
def __len__(self):
""" Return the number of glyphs in the font. """
return len(self._glyphs)
##############################################
def _find_font(self):
""" Find the font file location in the system using Kpathsea and build if it is not
already done.
"""
super(PkFont, self)._find_font(kpsewhich_options='-mktex=pk')
##############################################
def _set_preambule_data(self,
pk_id,
comment,
design_font_size,
checksum,
horizontal_dpi,
vertical_dpi):
""" Set the preambule data from the Packed Font Parser. """
self.pk_id = pk_id
self.comment = comment
self.design_font_size = design_font_size
self.checksum = checksum
self.horizontal_dpi = horizontal_dpi
self.vertical_dpi = vertical_dpi
##############################################
def register_glyph(self, glyph):
self._glyphs[glyph.char_code] = glyph
##############################################
def get_glyph(self, glyph_index, size=None, resolution=None):
return self._glyphs[glyph_index]
##############################################
def print_summary(self):
string_format = """
Preambule
- PK ID %u
- Comment '%s'
- Design size %.1f pt
- Checksum %u
- Resolution
- Horizontal %.1f dpi
- Vertical %.1f dpi """
message = self.print_header() + string_format % (
self.pk_id,
self.comment,
self.design_font_size,
self.checksum,
self.horizontal_dpi,
self.vertical_dpi,
)
print_card(message)
####################################################################################################
#
# End
#
####################################################################################################
|
AutorestCI/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-billing/azure/mgmt/billing/models/billing_period_paged.py
|
3
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class BillingPeriodPaged(Paged):
"""
A paging container for iterating over a list of BillingPeriod object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[BillingPeriod]'}
}
def __init__(self, *args, **kwargs):
super(BillingPeriodPaged, self).__init__(*args, **kwargs)
|
elkingtonmcb/scikit-learn
|
refs/heads/master
|
benchmarks/bench_multilabel_metrics.py
|
276
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
cjaymes/pyscap
|
refs/heads/master
|
src/scap/model/xhtml/DlTag.py
|
1
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xhtml import *
from scap.Model import Model
logger = logging.getLogger(__name__)
class DlTag(Model):
MODEL_MAP = {
'elements': [
{'tag_name': 'dt', 'list': '_elements', 'class': 'LiTag', 'max': None},
{'tag_name': 'dd', 'list': '_elements', 'class': 'LiTag', 'max': None},
],
'attributes': {},
}
MODEL_MAP['attributes'].update(ATTRIBUTE_GROUP_attrs)
|
cmajames/py_touhou
|
refs/heads/master
|
pytouhou/games/sample/shots.py
|
1
|
# -*- encoding: utf-8 -*-
##
## Copyright (C) 2014 Emmanuel Gil Peyrot <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
from math import radians
from pytouhou.formats.exe import SHT, Shot
player = SHT()
player.horizontal_vertical_speed = 2.
player.horizontal_vertical_focused_speed = 1.5
player.diagonal_speed = 1.5
player.diagonal_focused_speed = 1.
shot = Shot()
shot.interval = 10
shot.delay = 5
shot.pos = (0, -32)
shot.hitbox = (5, 5)
shot.angle = radians(-90)
shot.speed = 5.
shot.damage = 16
shot.orb = 0
shot.type = 2
shot.sprite = 64
shot.unknown1 = 0
# Dict of list of shots, each for one power level.
# Always define at least the shot for max power, usually 999.
player.shots[999] = [shot]
# List of (unfocused, focused) shot types.
characters = [(player, player)]
|
qibingbuso/pyew
|
refs/heads/master
|
batch_antidebug.py
|
16
|
#!/usr/bin/python
import os
import sys
import time
import hashlib
from pyew_core import CPyew
def printData(pyew, path, msg):
buf = pyew.getBuffer()
print "File :", path
print "MD5 :", hashlib.md5(buf).hexdigest()
print "SHA1 :", hashlib.sha1(buf).hexdigest()
print "SHA256:", hashlib.sha256(buf).hexdigest()
print "Found :", msg
def checkAntidebug(path):
t = time.time()
pyew = CPyew(batch=True)
pyew.codeanalysis = True
try:
pyew.loadFile(path)
except KeyboardInterrupt:
print "Abort"
sys.exit(0)
except:
print "ERROR loading file %s" % path
return
if pyew.format not in ["PE", "ELF"]:
return
if len(pyew.antidebug) > 0:
print
printData(pyew, path, pyew.antidebug)
print "Time to analyze %f" % (time.time() - t)
print
def doChecks(path):
checkAntidebug(path)
def main(path):
buf = ""
for root, dirs, files in os.walk(path):
for x in files:
filepath = os.path.join(root, x)
sys.stdout.write("\b"*len(buf) + " "*len(buf) + "\b"*len(buf))
buf = "Analyzing file %s ..." % filepath
sys.stdout.write(buf)
sys.stdout.flush()
doChecks(filepath)
print
def usage():
print "Usage:", sys.argv[0], "<path>"
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
else:
main(sys.argv[1])
|
Ichag/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_move_line_select.py
|
385
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_move_line_select(osv.osv_memory):
"""
Account move line select
"""
_name = "account.move.line.select"
_description = "Account move line select"
def open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
account_obj = self.pool.get('account.account')
fiscalyear_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
if 'fiscalyear' not in context:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
fiscalyear_ids = [context['fiscalyear']]
fiscalyears = fiscalyear_obj.browse(cr, uid, fiscalyear_ids, context=context)
period_ids = []
if fiscalyears:
for fiscalyear in fiscalyears:
for period in fiscalyear.period_ids:
period_ids.append(period.id)
domain = str(('period_id', 'in', period_ids))
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
result['context'] = {
'fiscalyear': False,
'account_id': context['active_id'],
'active_id': context['active_id'],
}
if context['active_id']:
acc_data = account_obj.browse(cr, uid, context['active_id']).child_consol_ids
if acc_data:
result['context'].update({'consolidate_children': True})
result['domain']=result['domain'][0:-1]+','+domain+result['domain'][-1]
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bdh1011/cupeye
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/__init__.py
|
47
|
# postgresql/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, psycopg2, pg8000, pypostgresql, zxjdbc, psycopg2cffi
base.dialect = psycopg2.dialect
from .base import \
INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \
DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \
TSVECTOR, DropEnumType
from .constraints import ExcludeConstraint
from .hstore import HSTORE, hstore
from .json import JSON, JSONElement, JSONB
from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
TSTZRANGE
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID',
'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE',
'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'JSONElement',
'DropEnumType'
)
|
jhjguxin/blogserver
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/localflavor/au/forms.py
|
309
|
"""
Australian-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
"""Australian post code field."""
default_error_messages = {
'invalid': _('Enter a 4 digit post code.'),
}
def __init__(self, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length=None, min_length=None, *args, **kwargs)
class AUPhoneNumberField(Field):
"""Australian phone number field."""
default_error_messages = {
'invalid': u'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""
Validate a phone number. Strips parentheses, whitespace and hyphens.
"""
super(AUPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+|-)', '', smart_unicode(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return u'%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""
A Select widget that uses a list of Australian states/territories as its
choices.
"""
def __init__(self, attrs=None):
from au_states import STATE_CHOICES
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
|
petercable/xray
|
refs/heads/master
|
xray/test/test_combine.py
|
1
|
from copy import deepcopy
import numpy as np
import pandas as pd
from xray import Dataset, DataArray, auto_combine, concat
from xray.core.pycompat import iteritems, OrderedDict
from . import TestCase, InaccessibleArray, requires_dask
from .test_dataset import create_test_data
class TestConcatDataset(TestCase):
def test_concat(self):
# TODO: simplify and split this test case
# drop the third dimension to keep things relatively understandable
data = create_test_data().drop('dim3')
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
self.assertDatasetIdentical(data, concat(split_data, 'dim1'))
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions tranposed into
# the order in which they are found in `data`
return Dataset(dict((k, v.transpose(*data[k].dims))
for k, v in iteritems(dataset.data_vars)),
dataset.coords, attrs=dataset.attrs)
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
self.assertDatasetIdentical(data, concat(datasets, dim))
self.assertDatasetIdentical(
data, concat(datasets, data[dim]))
self.assertDatasetIdentical(
data, concat(datasets, data[dim], coords='minimal'))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in iteritems(data.coords)
if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
self.assertDatasetIdentical(data, rectify_dim_order(actual))
actual = concat(datasets, data[dim], coords='different')
self.assertDatasetIdentical(data, rectify_dim_order(actual))
# make sure the coords argument behaves as expected
data.coords['extra'] = ('dim4', np.arange(3))
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords='all')
expected = np.array([data['extra'].values
for _ in range(data.dims[dim])])
self.assertArrayEqual(actual['extra'].values, expected)
actual = concat(datasets, data[dim], coords='different')
self.assertDataArrayEqual(data['extra'], actual['extra'])
actual = concat(datasets, data[dim], coords='minimal')
self.assertDataArrayEqual(data['extra'], actual['extra'])
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data['dim1']).rename('dim1')
datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
expected = data.copy()
expected['dim1'] = dim
self.assertDatasetIdentical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ['minimal', 'different', 'all', [], ['foo']]:
actual = concat(objs, dim='x', data_vars=data_vars)
self.assertDatasetIdentical(data, actual)
def test_concat_coords(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
objs = [data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1)]
for coords in ['different', 'all', ['c']]:
actual = concat(objs, dim='x', coords=coords)
self.assertDatasetIdentical(expected, actual)
for coords in ['minimal', []]:
with self.assertRaisesRegexp(ValueError, 'not equal across'):
concat(objs, dim='x', coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({'foo': 1.5}, {'y': 1})
ds2 = Dataset({'foo': 2.5}, {'y': 1})
expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
for mode in ['different', 'all', ['foo']]:
actual = concat([ds1, ds2], 'y', data_vars=mode)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
concat([ds1, ds2], 'y', data_vars='minimal')
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, 'dim1')
self.assertDatasetIdentical(data, actual)
actual = concat(split_data[::-1], 'dim1')
self.assertDatasetIdentical(data, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
with self.assertRaisesRegexp(ValueError, 'must supply at least one'):
concat([], 'dim1')
with self.assertRaisesRegexp(ValueError, 'are not coordinates'):
concat([data, data], 'new_dim', coords=['not_found'])
with self.assertRaisesRegexp(ValueError, 'global attributes not'):
data0, data1 = deepcopy(split_data)
data1.attrs['foo'] = 'bar'
concat([data0, data1], 'dim1', compat='identical')
self.assertDatasetIdentical(
data, concat([data0, data1], 'dim1', compat='equals'))
with self.assertRaisesRegexp(ValueError, 'encountered unexpected'):
data0, data1 = deepcopy(split_data)
data1['foo'] = ('bar', np.random.randn(10))
concat([data0, data1], 'dim1')
with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
data0, data1 = deepcopy(split_data)
data1['dim2'] = 2 * data1['dim2']
concat([data0, data1], 'dim1', coords='minimal')
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
concat([data0, data1], 'dim1')
with self.assertRaisesRegexp(ValueError, 'compat.* invalid'):
concat(split_data, 'dim1', compat='foobar')
with self.assertRaisesRegexp(ValueError, 'unexpected value for'):
concat([data, data], 'new_dim', coords='foobar')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', mode='different')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', concat_over='different')
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(actual, expected)
objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})]
actual = concat(objs, 'x')
self.assertDatasetIdentical(actual, expected)
# mixed dims between variables
objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})]
actual = concat(objs, 'x')
expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])})
self.assertDatasetIdentical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1]}, {'y': ('x', [-2])})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])})
self.assertDatasetIdentical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1, 2]}, {'y': -2})]
actual = concat(objs, 'x')
expected = Dataset({}, {'y': ('x', [-1, -2, -2])})
self.assertDatasetIdentical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}),
Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})]
actual = concat(objs, 'x')
expected = Dataset({'z': (('x', 'y'), [[-1], [1]])})
self.assertDatasetIdentical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [Dataset({'y': ('t', [1])}, {'x': 1}),
Dataset({'y': ('t', [2])}, {'x': 1})]
expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
actual = concat(objs, 't')
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'y': ('t', [1])}, {'x': 1}),
Dataset({'y': ('t', [2])}, {'x': 2})]
with self.assertRaises(ValueError):
concat(objs, 't', coords='minimal')
@requires_dask # only for toolz
def test_auto_combine(self):
objs = [Dataset({'x': [0]}), Dataset({'x': [1]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(expected, actual)
actual = auto_combine([actual])
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0, 1]}), Dataset({'x': [2]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1, 2]})
self.assertDatasetIdentical(expected, actual)
# ensure auto_combine handles non-sorted dimensions
objs = [Dataset(OrderedDict([('x', ('a', [0])), ('y', ('a', [0]))])),
Dataset(OrderedDict([('y', ('a', [1])), ('x', ('a', [1]))]))]
actual = auto_combine(objs)
expected = Dataset({'x': ('a', [0, 1]), 'y': ('a', [0, 1]), 'a': [0, 0]})
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'y': [1], 'x': [1]})]
with self.assertRaisesRegexp(ValueError, 'too many .* dimensions'):
auto_combine(objs)
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
with self.assertRaisesRegexp(ValueError, 'cannot infer dimension'):
auto_combine(objs)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'x': [0]})]
with self.assertRaises(KeyError):
auto_combine(objs)
class TestConcatDataArray(TestCase):
def test_concat(self):
ds = Dataset({'foo': (['x', 'y'], np.random.random((10, 20))),
'bar': (['x', 'y'], np.random.random((10, 20)))})
foo = ds['foo']
bar = ds['bar']
# from dataset array:
expected = DataArray(np.array([foo.values, bar.values]),
dims=['w', 'x', 'y'])
actual = concat([foo, bar], 'w')
self.assertDataArrayEqual(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby('x')]
stacked = concat(grouped, ds['x'])
self.assertDataArrayIdentical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes['x'])
self.assertDataArrayIdentical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not identical'):
concat([foo, bar], dim='w', compat='identical')
with self.assertRaisesRegexp(ValueError, 'not a valid argument'):
concat([foo, bar], dim='w', data_vars='minimal')
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
dims=['x', 'y']) for _ in range(2)]
# should not raise
combined = concat(arrays, dim='z')
self.assertEqual(combined.shape, (2, 3, 3))
self.assertEqual(combined.dims, ('z', 'x', 'y'))
|
sfcta/ChampClient
|
refs/heads/master
|
ChampClient.py
|
1
|
from time import time,localtime,strftime
import os, sys, random, time, subprocess, threading
import Pyro.core
def threaded(f):
def wrapper(*args):
t = threading.Thread(target=f, args=args)
t.start()
return wrapper
class HelpRequest(Pyro.core.ObjBase):
def __init__(self):
Pyro.core.ObjBase.__init__(self)
def help(self, dispatcher=None,single=False):
print "\r" + strftime("%x %X", localtime()) + " Request for help from:",dispatcher[10:]
SpawnAllHelpers(dispatcher,single)
# -------------------------------------------------------------------------
# This next function always spawns a new thread.
@threaded
def SpawnAllHelpers(dispatcher,single):
if (single):
SpawnHelper(dispatcher)
else:
for i in range(NUMCPU):
SpawnHelper(dispatcher)
# We wait a few secs to help distribute jobs across more machines
# before we distribute across multi-CPU's.
time.sleep(3)
# -------------------------------------------------------------------------
# This next function always spawns a new thread.
@threaded
def SpawnHelper(dispatcher):
# print "SpawnHelper"
runner = CPURunner.acquire() # block, until a CPU is free
# print "CPU Acquired"
# Now we have a CPU. Let's try and pull a Job.
try:
joblist = Pyro.core.getProxyForURI(dispatcher)
while True:
# Pull job from dispatcher
jobnum, job = joblist.get()
if job==None:
break
print "\n# " + strftime("%x %X", localtime()) + " Starting: ",job.cmd,"\n# ("+job.workdir+")"
rtncode, logname = RunJob(job,joblist,"source")
# Job completed. Notify dispatcher this one's finished.
print "\n# " + strftime("%x %X", localtime()) + " Finished:",job.cmd,"\n### "+job.workdir
joblist.alldone(jobnum, job, rtncode, logname)
# And, loop back and try to get another.
except: # Code drops to here when dispatcher daemon drops connection.
pass
# All done with this jobqueue, let's give the CPU back and exit.
CPURunner.release()
if (threading.activeCount() == 3): # 3 means no more jobs are using the CPU!
print "\n---------------------------------\nWaiting...",
return
def RunJob(job, joblist, source):
# Set up job attributes -- working dir, environment, killstatus
# Open logfile
logname = ""
log = ""
try:
logname = os.path.join(job.workdir,
"dispatch-"+str(random.randint(100000000,999999999))+".log")
log = open(logname, "w")
except:
# Couldn't cd to workdir, or couldn't open logfile. Die.
return 3, None
# Set up the environment
envvars = os.environ.copy()
for key,val in job.env.iteritems():
if key=="PATH_PREFIX":
envvars['PATH'] = val + envvars['PATH']
else:
envvars[key] = val
print "\n# Environment PATH:", envvars['PATH']
# Spawn the process
child = subprocess.Popen(job.cmd,cwd=job.workdir, env=envvars, shell=True,
stdout=log, stderr=log)
wait = 0
rtncode = None
while (rtncode == None):
try:
time.sleep(1)
rtncode = child.poll()
# Check for kill request
if (wait % 10 == 0):
wait = 0
if (joblist.killMe() == True):
print "Got Kill Request"
kill = subprocess.Popen("taskkill /F /T /PID %i" % child.pid, shell=True)
rtncode = 0
break
wait += 1
except:
print "Lost connection: Killing job!"
kill = subprocess.Popen("taskkill /F /T /PID %i" % child.pid, shell=True)
rtncode = 0
break
# Done! Close things out.
# Concatenate logfiles (bug in python.. drat!)
log.close()
log = open(logname, "r")
# Using a threadsafe lock function so that multiple threads can append output
# to the logfile without tripping on each other.
LOGGERLOCK.acquire()
logfile = open(os.path.join(job.workdir,LOGGERNAME),"a")
for line in log:
logfile.write(line)
logfile.write("======= Finished "+time.asctime()+" ==============================\n")
# Close out the logfiles and set to null so windows can delete them.
log.flush()
log.close()
logfile.flush()
logfile.close()
log=None
logfile=None
try:
os.remove(logname)
except:
pass # print sys.exc_info() sometimes Windows doesn't release the logfile... :-(
LOGGERLOCK.release()
return rtncode, logname
# ------------------------------------------------------------
# Empty class, just to hold a structure of public variables:
# Job: cmd, dir, env, cmdwithargs
class Job:
pass
# ------------------------------------------------------------
# Initialization!
if (__name__ == "__main__"):
LOGGERNAME = os.getenv("COMPUTERNAME")+".log"
LOGGERLOCK = threading.Lock()
NUMCPU = int(os.getenv("NUMBER_OF_PROCESSORS"))
if len(sys.argv) > 1:
NUMCPU = int(sys.argv[1])
CPURunner = threading.Semaphore(NUMCPU)
# --- Infrastructure now ready; let's fire up the Pyro listener.
try:
Pyro.core.initServer(banner=0)
daemon = Pyro.core.Daemon()
print "The daemon runs on port:",daemon.port
uri = daemon.connect(HelpRequest(),"help")
print "\nSF-CHAMP Dispatcher Client:",NUMCPU,"CPU\n---------------------------------"
print strftime("%x %X", localtime()) + " Waiting...",
daemon.requestLoop()
except:
print sys.exc_info()
|
quarkonics/zstack-woodpecker
|
refs/heads/master
|
zstackwoodpecker/zstackwoodpecker/header/vm.py
|
1
|
import apibinding.inventory as inventory
import zstackwoodpecker.header.header as zstack_header
RUNNING = inventory.RUNNING
STOPPED = inventory.STOPPED
DESTROYED = inventory.DESTROYED
EXPUNGED = 'EXPUNGED'
VOLUME_BANDWIDTH = 'volumeTotalBandwidth'
VOLUME_IOPS = 'volumeTotalIops'
NETWORK_OUTBOUND_BANDWIDTH = 'networkOutboundBandwidth'
class TestVm(zstack_header.ZstackObject):
def __init__(self):
self.vm = None
self.state = None
self.delete_policy = None
self.delete_delay_time = None
def __repr__(self):
if self.vm:
return '%s-%s' % (self.__class__.__name__, self.vm.uuid)
return '%s-None' % self.__class__.__name__
def create(self):
self.state = RUNNING
def destroy(self):
if self.delete_policy != zstack_header.DELETE_DIRECT:
self.state = DESTROYED
else:
self.state = EXPUNGED
def start(self):
self.state = RUNNING
def stop(self):
self.state = STOPPED
def reboot(self):
self.state = RUNNING
def migrate(self, target_host):
pass
def check(self):
pass
def expunge(self):
self.state = EXPUNGED
def get_vm(self):
return self.vm
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def update(self):
pass
def set_delete_policy(self, policy):
self.delete_policy = policy
def get_delete_policy(self):
return self.delete_policy
def set_delete_delay_time(self, time):
self.delete_delay_time = time
def get_delete_delay_time(self):
return self.delete_delay_time
|
Eivindbergman/Skrapa
|
refs/heads/master
|
libs/chardet/langturkishmodel.py
|
269
|
# -*- coding: utf-8 -*-
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Özgür Baskın - Turkish Language Model
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin5_TurkishCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42,
48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255,
255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15,
26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255,
180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165,
164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106,
150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136,
94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125,
124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119,
68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86,
89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96,
30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107,
)
TurkishLangModel = (
3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3,
3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3,
3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1,
3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3,
3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1,
3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2,
2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1,
3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2,
2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0,
1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1,
2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2,
3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1,
3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2,
2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1,
3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2,
2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,
3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2,
3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3,
0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,
3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1,
0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0,
3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1,
3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3,
2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3,
2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0,
0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0,
1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2,
3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,
3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2,
2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0,
0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0,
3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1,
0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1,
1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,
1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3,
2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1,
2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1,
2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0,
0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,
3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1,
1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2,
0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1,
3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1,
0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0,
3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,
3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,
1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2,
2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1,
0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0,
3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0,
0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0,
0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0,
3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0,
0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0,
0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0,
3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0,
0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1,
3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,
0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1,
0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0,
3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0,
0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0,
3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0,
0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0,
0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0,
3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0,
0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0,
0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0,
0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0,
0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0,
1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0,
0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1,
0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0,
3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0,
0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,
2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,
2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0,
0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,
0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin5TurkishModel = {
'char_to_order_map': Latin5_TurkishCharToOrderMap,
'precedence_matrix': TurkishLangModel,
'typical_positive_ratio': 0.970290,
'keep_english_letter': True,
'charset_name': "ISO-8859-9",
'language': 'Turkish',
}
|
bixbydev/Bixby
|
refs/heads/master
|
google/gdata/tlslite/constants.py
|
279
|
"""Constants used in various places."""
class CertificateType:
x509 = 0
openpgp = 1
cryptoID = 2
class HandshakeType:
hello_request = 0
client_hello = 1
server_hello = 2
certificate = 11
server_key_exchange = 12
certificate_request = 13
server_hello_done = 14
certificate_verify = 15
client_key_exchange = 16
finished = 20
class ContentType:
change_cipher_spec = 20
alert = 21
handshake = 22
application_data = 23
all = (20,21,22,23)
class AlertLevel:
warning = 1
fatal = 2
class AlertDescription:
"""
@cvar bad_record_mac: A TLS record failed to decrypt properly.
If this occurs during a shared-key or SRP handshake it most likely
indicates a bad password. It may also indicate an implementation
error, or some tampering with the data in transit.
This alert will be signalled by the server if the SRP password is bad. It
may also be signalled by the server if the SRP username is unknown to the
server, but it doesn't wish to reveal that fact.
This alert will be signalled by the client if the shared-key username is
bad.
@cvar handshake_failure: A problem occurred while handshaking.
This typically indicates a lack of common ciphersuites between client and
server, or some other disagreement (about SRP parameters or key sizes,
for example).
@cvar protocol_version: The other party's SSL/TLS version was unacceptable.
This indicates that the client and server couldn't agree on which version
of SSL or TLS to use.
@cvar user_canceled: The handshake is being cancelled for some reason.
"""
close_notify = 0
unexpected_message = 10
bad_record_mac = 20
decryption_failed = 21
record_overflow = 22
decompression_failure = 30
handshake_failure = 40
no_certificate = 41 #SSLv3
bad_certificate = 42
unsupported_certificate = 43
certificate_revoked = 44
certificate_expired = 45
certificate_unknown = 46
illegal_parameter = 47
unknown_ca = 48
access_denied = 49
decode_error = 50
decrypt_error = 51
export_restriction = 60
protocol_version = 70
insufficient_security = 71
internal_error = 80
user_canceled = 90
no_renegotiation = 100
unknown_srp_username = 120
missing_srp_username = 121
untrusted_srp_parameters = 122
class CipherSuite:
TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050
TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053
TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056
TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051
TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054
TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057
TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
TLS_RSA_WITH_RC4_128_SHA = 0x0005
srpSuites = []
srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
def getSrpSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
return suites
getSrpSuites = staticmethod(getSrpSuites)
srpRsaSuites = []
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
def getSrpRsaSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
return suites
getSrpRsaSuites = staticmethod(getSrpRsaSuites)
rsaSuites = []
rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA)
def getRsaSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA)
elif cipher == "rc4":
suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA)
return suites
getRsaSuites = staticmethod(getRsaSuites)
tripleDESSuites = []
tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA)
aes128Suites = []
aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA)
aes256Suites = []
aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA)
rc4Suites = []
rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA)
class Fault:
badUsername = 101
badPassword = 102
badA = 103
clientSrpFaults = range(101,104)
badVerifyMessage = 601
clientCertFaults = range(601,602)
badPremasterPadding = 501
shortPremasterSecret = 502
clientNoAuthFaults = range(501,503)
badIdentifier = 401
badSharedKey = 402
clientSharedKeyFaults = range(401,403)
badB = 201
serverFaults = range(201,202)
badFinished = 300
badMAC = 301
badPadding = 302
genericFaults = range(300,303)
faultAlerts = {\
badUsername: (AlertDescription.unknown_srp_username, \
AlertDescription.bad_record_mac),\
badPassword: (AlertDescription.bad_record_mac,),\
badA: (AlertDescription.illegal_parameter,),\
badIdentifier: (AlertDescription.handshake_failure,),\
badSharedKey: (AlertDescription.bad_record_mac,),\
badPremasterPadding: (AlertDescription.bad_record_mac,),\
shortPremasterSecret: (AlertDescription.bad_record_mac,),\
badVerifyMessage: (AlertDescription.decrypt_error,),\
badFinished: (AlertDescription.decrypt_error,),\
badMAC: (AlertDescription.bad_record_mac,),\
badPadding: (AlertDescription.bad_record_mac,)
}
faultNames = {\
badUsername: "bad username",\
badPassword: "bad password",\
badA: "bad A",\
badIdentifier: "bad identifier",\
badSharedKey: "bad sharedkey",\
badPremasterPadding: "bad premaster padding",\
shortPremasterSecret: "short premaster secret",\
badVerifyMessage: "bad verify message",\
badFinished: "bad finished message",\
badMAC: "bad MAC",\
badPadding: "bad padding"
}
|
GitHublong/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/extras/csrf_migration_helper.py
|
59
|
#!/usr/bin/env python
# This script aims to help developers locate forms and view code that needs to
# use the new CSRF protection in Django 1.2. It tries to find all the code that
# may need the steps described in the CSRF documentation. It does not modify
# any code directly, it merely attempts to locate it. Developers should be
# aware of its limitations, described below.
#
# For each template that contains at least one POST form, the following info is printed:
#
# <Absolute path to template>
# AKA: <Aliases (relative to template directory/directories that contain it)>
# POST forms: <Number of POST forms>
# With token: <Number of POST forms with the CSRF token already added>
# Without token:
# <File name and line number of form without token>
#
# Searching for:
# <Template names that need to be searched for in view code
# (includes templates that 'include' current template)>
#
# Found:
# <File name and line number of any view code found>
#
# The format used allows this script to be used in Emacs grep mode:
# M-x grep
# Run grep (like this): /path/to/my/virtualenv/python /path/to/django/src/extras/csrf_migration_helper.py --settings=mysettings /path/to/my/srcs
# Limitations
# ===========
#
# - All templates must be stored on disk in '.html' or '.htm' files.
# (extensions configurable below)
#
# - All Python code must be stored on disk in '.py' files. (extensions
# configurable below)
#
# - All templates must be accessible from TEMPLATE_DIRS or from the 'templates/'
# directory in apps specified in INSTALLED_APPS. Non-file based template
# loaders are out of the picture, because there is no way to ask them to
# return all templates.
#
# - It's impossible to programmatically determine which forms should and should
# not have the token added. The developer must decide when to do this,
# ensuring that the token is only added to internally targeted forms.
#
# - It's impossible to programmatically work out when a template is used. The
# attempts to trace back to view functions are guesses, and could easily fail
# in the following ways:
#
# * If the 'include' template tag is used with a variable
# i.e. {% include tname %} where tname is a variable containing the actual
# template name, rather than {% include "my_template.html" %}.
#
# * If the template name has been built up by view code instead of as a simple
# string. For example, generic views and the admin both do this. (These
# apps are both contrib and both use RequestContext already, as it happens).
#
# * If the 'ssl' tag (or any template tag other than 'include') is used to
# include the template in another template.
#
# - All templates belonging to apps referenced in INSTALLED_APPS will be
# searched, which may include third party apps or Django contrib. In some
# cases, this will be a good thing, because even if the templates of these
# apps have been fixed by someone else, your own view code may reference the
# same template and may need to be updated.
#
# You may, however, wish to comment out some entries in INSTALLED_APPS or
# TEMPLATE_DIRS before running this script.
# Improvements to this script are welcome!
# Configuration
# =============
TEMPLATE_EXTENSIONS = [
".html",
".htm",
]
PYTHON_SOURCE_EXTENSIONS = [
".py",
]
TEMPLATE_ENCODING = "UTF-8"
PYTHON_ENCODING = "UTF-8"
# Method
# ======
# Find templates:
# - template dirs
# - installed apps
#
# Search for POST forms
# - Work out what the name of the template is, as it would appear in an
# 'include' or get_template() call. This can be done by comparing template
# filename to all template dirs. Some templates can have more than one
# 'name' e.g. if a directory and one of its child directories are both in
# TEMPLATE_DIRS. This is actually a common hack used for
# overriding-and-extending admin templates.
#
# For each POST form,
# - see if it already contains '{% csrf_token %}' immediately after <form>
# - work back to the view function(s):
# - First, see if the form is included in any other templates, then
# recursively compile a list of affected templates.
# - Find any code function that references that template. This is just a
# brute force text search that can easily return false positives
# and fail to find real instances.
import os
import sys
import re
from optparse import OptionParser
USAGE = """
This tool helps to locate forms that need CSRF tokens added and the
corresponding view code. This processing is NOT fool proof, and you should read
the help contained in the script itself. Also, this script may need configuring
(by editing the script) before use.
Usage:
python csrf_migration_helper.py [--settings=path.to.your.settings] /path/to/python/code [more paths...]
Paths can be specified as relative paths.
With no arguments, this help is printed.
"""
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_FORM_CLOSE_RE = re.compile(r'</form\s*>')
_TOKEN_RE = re.compile('\{% csrf_token')
def get_template_dirs():
"""
Returns a set of all directories that contain project templates.
"""
from django.conf import settings
dirs = set()
if ('django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.filesystem.Loader' in settings.TEMPLATE_LOADERS):
dirs.update(map(unicode, settings.TEMPLATE_DIRS))
if ('django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.app_directories.Loader' in settings.TEMPLATE_LOADERS):
from django.template.loaders.app_directories import app_template_dirs
dirs.update(app_template_dirs)
return dirs
def make_template_info(filename, root_dirs):
"""
Creates a Template object for a filename, calculating the possible
relative_filenames from the supplied filename and root template directories
"""
return Template(filename,
[filename[len(d)+1:] for d in root_dirs if filename.startswith(d)])
class Template(object):
def __init__(self, absolute_filename, relative_filenames):
self.absolute_filename, self.relative_filenames = absolute_filename, relative_filenames
def content(self):
try:
return self._content
except AttributeError:
with open(self.absolute_filename) as fd:
try:
content = fd.read().decode(TEMPLATE_ENCODING)
except UnicodeDecodeError as e:
message = '%s in %s' % (
e[4], self.absolute_filename.encode('UTF-8', 'ignore'))
raise UnicodeDecodeError(*(e.args[:4] + (message,)))
self._content = content
return content
content = property(content)
def post_form_info(self):
"""
Get information about any POST forms in the template.
Returns [(linenumber, csrf_token added)]
"""
forms = {}
form_line = 0
for ln, line in enumerate(self.content.split("\n")):
if not form_line and _POST_FORM_RE.search(line):
# record the form with no CSRF token yet
form_line = ln + 1
forms[form_line] = False
if form_line and _TOKEN_RE.search(line):
# found the CSRF token
forms[form_line] = True
form_line = 0
if form_line and _FORM_CLOSE_RE.search(line):
# no token found by form closing tag
form_line = 0
return forms.items()
def includes_template(self, t):
"""
Returns true if this template includes template 't' (via {% include %})
"""
for r in t.relative_filenames:
if re.search(r'\{%\s*include\s+(\'|")' + re.escape(r) + r'(\1)\s*%\}', self.content):
return True
return False
def related_templates(self):
"""
Returns all templates that include this one, recursively. (starting
with this one)
"""
try:
return self._related_templates
except AttributeError:
pass
retval = set([self])
for t in self.all_templates:
if t.includes_template(self):
# If two templates mutually include each other, directly or
# indirectly, we have a problem here...
retval = retval.union(t.related_templates())
self._related_templates = retval
return retval
def __repr__(self):
return repr(self.absolute_filename)
def __eq__(self, other):
return self.absolute_filename == other.absolute_filename
def __hash__(self):
return hash(self.absolute_filename)
def get_templates(dirs):
"""
Returns all files in dirs that have template extensions, as Template
objects.
"""
templates = set()
for root in dirs:
for (dirpath, dirnames, filenames) in os.walk(root):
for f in filenames:
if len([True for e in TEMPLATE_EXTENSIONS if f.endswith(e)]) > 0:
t = make_template_info(os.path.join(dirpath, f), dirs)
# templates need to be able to search others:
t.all_templates = templates
templates.add(t)
return templates
def get_python_code(paths):
"""
Returns all Python code, as a list of tuples, each one being:
(filename, list of lines)
"""
retval = []
for p in paths:
if not os.path.isdir(p):
raise Exception("'%s' is not a directory." % p)
for (dirpath, dirnames, filenames) in os.walk(p):
for f in filenames:
if len([True for e in PYTHON_SOURCE_EXTENSIONS if f.endswith(e)]) > 0:
fn = os.path.join(dirpath, f)
with open(fn) as fd:
content = [l.decode(PYTHON_ENCODING) for l in fd.readlines()]
retval.append((fn, content))
return retval
def search_python_list(python_code, template_names):
"""
Searches python code for a list of template names.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = set()
for tn in template_names:
retval.update(search_python(python_code, tn))
return sorted(retval)
def search_python(python_code, template_name):
"""
Searches Python code for a template name.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for fn, content in python_code:
for ln, line in enumerate(content):
if ((u'"%s"' % template_name) in line) or \
((u"'%s'" % template_name) in line):
retval.append((fn, ln + 1))
return retval
def main(pythonpaths):
template_dirs = get_template_dirs()
templates = get_templates(template_dirs)
python_code = get_python_code(pythonpaths)
for t in templates:
# Logic
form_matches = t.post_form_info()
num_post_forms = len(form_matches)
form_lines_without_token = [ln for (ln, has_token) in form_matches if not has_token]
if num_post_forms == 0:
continue
to_search = [rf for rt in t.related_templates() for rf in rt.relative_filenames]
found = search_python_list(python_code, to_search)
# Display:
print(t.absolute_filename)
for r in t.relative_filenames:
print(" AKA %s" % r)
print(" POST forms: %s" % num_post_forms)
print(" With token: %s" % (num_post_forms - len(form_lines_without_token)))
if form_lines_without_token:
print(" Without token:")
for ln in form_lines_without_token:
print("%s:%d:" % (t.absolute_filename, ln))
print('')
print(" Searching for:")
for r in to_search:
print(" " + r)
print('')
print(" Found:")
if len(found) == 0:
print(" Nothing")
else:
for fn, ln in found:
print("%s:%d:" % (fn, ln))
print('')
print("----")
parser = OptionParser(usage=USAGE)
parser.add_option("", "--settings", action="store", dest="settings", help="Dotted path to settings file")
if __name__ == '__main__':
options, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
settings = getattr(options, 'settings', None)
if settings is None:
if os.environ.get("DJANGO_SETTINGS_MODULE", None) is None:
print("You need to set DJANGO_SETTINGS_MODULE or use the '--settings' parameter")
sys.exit(1)
else:
os.environ["DJANGO_SETTINGS_MODULE"] = settings
main(args)
|
trishnaguha/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/csvfile.py
|
80
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
options:
col:
description: column to return (0 index).
default: "1"
default:
description: what to return if the value is not found in the file.
default: ''
delimiter:
description: field separator in the file, for a tab you can specify "TAB" or "t".
default: TAB
file:
description: name of the CSV/TSV file to open.
default: ansible.csv
encoding:
description: Encoding (character set) of the used CSV file.
default: utf-8
version_added: "2.1"
notes:
- The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
"""
EXAMPLES = """
- name: Match 'Li' on the first column, return the second column (0 based index)
debug: msg="The atomic number of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=,') }}"
- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
debug: msg="The atomic mass of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
- name: Define Values From CSV File
set_fact:
loop_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=1') }}"
int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=2') }}"
int_mask: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=3') }}"
int_name: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=4') }}"
local_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=5') }}"
neighbor_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=6') }}"
neigh_int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=7') }}"
delegate_to: localhost
"""
RETURN = """
_raw:
description:
- value(s) stored in file column
"""
import codecs
import csv
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.six import PY2
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common._collections_compat import MutableSequence
class CSVRecoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def __next__(self):
return next(self.reader).encode("utf-8")
next = __next__ # For Python 2
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
if PY2:
f = CSVRecoder(f, encoding)
else:
f = codecs.getreader(encoding)(f)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [to_text(s) for s in row]
next = __next__ # For Python 2
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
try:
f = open(filename, 'rb')
creader = CSVReader(f, delimiter=to_native(delimiter), encoding=encoding)
for row in creader:
if len(row) and row[0] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_native(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'col': "1", # column to return
'default': None,
'delimiter': "TAB",
'file': 'ansible.csv',
'encoding': 'utf-8',
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
|
davidzchen/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/sparsemask_op_test.py
|
35
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SparseMaskTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
values = np.random.rand(4, 4).astype(np.single)
indices = np.array([0, 2, 3, 4], dtype=np.int32)
mask_indices = np.array([0], dtype=np.int32)
out_values = values[1:, :]
out_indices = np.array([2, 3, 4], dtype=np.int32)
with self.cached_session() as sess:
values_tensor = ops.convert_to_tensor(values)
indices_tensor = ops.convert_to_tensor(indices)
mask_indices_tensor = ops.convert_to_tensor(mask_indices)
t = ops.IndexedSlices(values_tensor, indices_tensor)
masked_t = array_ops.sparse_mask(t, mask_indices_tensor)
tf_out_values, tf_out_indices = sess.run(
[masked_t.values, masked_t.indices])
self.assertAllEqual(tf_out_values, out_values)
self.assertAllEqual(tf_out_indices, out_indices)
if __name__ == "__main__":
test.main()
|
CharlesShang/TFFRCNN
|
refs/heads/master
|
lib/datasets/kitti_tracking.py
|
3
|
import os
import PIL
import numpy as np
import scipy.sparse
import subprocess
import cPickle
import math
from .imdb import imdb
from .imdb import ROOT_DIR
from ..utils.cython_bbox import bbox_overlaps
from ..utils.boxes_grid import get_boxes_grid
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..rpn_msr.generate_anchors import generate_anchors
# <<<< obsolete
class kitti_tracking(imdb):
def __init__(self, image_set, seq_name, kitti_tracking_path=None):
imdb.__init__(self, 'kitti_tracking_' + image_set + '_' + seq_name)
self._image_set = image_set
self._seq_name = seq_name
self._kitti_tracking_path = self._get_default_path() if kitti_tracking_path is None \
else kitti_tracking_path
self._data_path = os.path.join(self._kitti_tracking_path, image_set, 'image_02')
self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
if image_set == 'training' and seq_name != 'trainval':
self._num_subclasses = 220 + 1
else:
self._num_subclasses = 472 + 1
# load the mapping for subcalss to class
if image_set == 'training' and seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
self.config = {'top_k': 100000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._kitti_tracking_path), \
'kitti_tracking path does not exist: {}'.format(self._kitti_tracking_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
kitti_train_nums = [154, 447, 233, 144, 314, 297, 270, 800, 390, 803, 294, \
373, 78, 340, 106, 376, 209, 145, 339, 1059, 837]
kitti_test_nums = [465, 147, 243, 257, 421, 809, 114, 215, 165, 349, 1176, \
774, 694, 152, 850, 701, 510, 305, 180, 404, 173, 203, \
436, 430, 316, 176, 170, 85, 175]
if self._seq_name == 'train' or self._seq_name == 'trainval':
assert self._image_set == 'training', 'Use train set or trainval set in testing'
if self._seq_name == 'train':
seq_index = [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16]
else:
seq_index = range(0, 21)
# for each sequence
image_index = []
for i in xrange(len(seq_index)):
seq_idx = seq_index[i]
num = kitti_train_nums[seq_idx]
for j in xrange(num):
image_index.append('{:04d}/{:06d}'.format(seq_idx, j))
else:
# a single sequence
seq_num = int(self._seq_name)
if self._image_set == 'training':
num = kitti_train_nums[seq_num]
else:
num = kitti_test_nums[seq_num]
image_index = []
for i in xrange(num):
image_index.append('{:04d}/{:06d}'.format(seq_num, i))
return image_index
def _get_default_path(self):
"""
Return the default path where kitti_tracking is expected to be installed.
"""
return os.path.join(ROOT_DIR, 'data', 'KITTI_Tracking')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_kitti_voxel_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print out recall
for i in xrange(1, self.num_classes):
print '{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i])
print '{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i])
print '{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i]))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_kitti_voxel_exemplar_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI voxel exemplar format.
"""
if self._image_set == 'training' and self._seq_name != 'trainval':
prefix = 'train'
elif self._image_set == 'training':
prefix = 'trainval'
else:
prefix = ''
if prefix == '':
lines = []
lines_flipped = []
else:
filename = os.path.join(self._kitti_tracking_path, cfg.SUBCLS_NAME, prefix, index + '.txt')
if os.path.exists(filename):
print filename
# the annotation file contains flipped objects
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if subcls != -1:
if is_flip == 0:
lines.append(line)
else:
lines_flipped.append(line)
else:
lines = []
lines_flipped = []
num_objs = len(lines)
# store information of flipped objects
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
for ix, line in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [float(n) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[ix, cls] = 1.0
subindexes[ix, cls] = subcls
subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = 2**np.arange(1, 6, 0.5)
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps': overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def region_proposal_roidb(self):
"""
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_' + cfg.SUBCLS_NAME + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} roidb loaded from {}'.format(self.name, cache_file)
return roidb
if self._image_set != 'testing':
gt_roidb = self.gt_roidb()
print 'Loading region proposal network boxes...'
if self._image_set == 'trainval':
model = cfg.REGION_PROPOSAL + '_trainval/'
else:
model = cfg.REGION_PROPOSAL + '_train/'
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print 'Region proposal network boxes loaded'
roidb = imdb.merge_roidbs(rpn_roidb, gt_roidb)
else:
print 'Loading region proposal network boxes...'
model = cfg.REGION_PROPOSAL + '_trainval/'
roidb = self._load_rpn_roidb(None, model)
print 'Region proposal network boxes loaded'
print '{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote roidb to {}'.format(cache_file)
return roidb
def _load_rpn_roidb(self, gt_roidb, model):
# set the prefix
prefix = model
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_tracking_path, 'region_proposals', prefix, self._image_set, index + '.txt')
assert os.path.exists(filename), \
'RPN data not found at: {}'.format(filename)
print filename
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 5))
else:
raw_data = raw_data.reshape((1, 5))
x1 = raw_data[:, 0]
y1 = raw_data[:, 1]
x2 = raw_data[:, 2]
y2 = raw_data[:, 3]
score = raw_data[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
raw_data = raw_data[inds,:4]
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def evaluate_detections(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'training' and self._seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index[5:] + '.txt')
print 'Writing kitti_tracking results to file ' + filename
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1 -1 -1 -1 {:.32f}\n'.format(\
cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
# write detection results into one file
def evaluate_detections_one_file(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'training' and self._seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# open results file
filename = os.path.join(output_dir, self._seq_name+'.txt')
print 'Writing all kitti_tracking results to file ' + filename
with open(filename, 'wt') as f:
# for each image
for im_ind, index in enumerate(self.image_index):
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:d} -1 {:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1000 -1000 -1000 -10 {:f}\n'.format(\
im_ind, cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index[5:] + '.txt')
print 'Writing kitti_tracking results to file ' + filename
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(\
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals_msr(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print 'Writing kitti_tracking results to file ' + filename
with open(filename, 'wt') as f:
dets = all_boxes[im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
if __name__ == '__main__':
d = kitti_tracking('training', '0000')
res = d.roidb
from IPython import embed; embed()
|
christophmeissner/volunteer_planner
|
refs/heads/develop
|
shiftmailer/excelexport.py
|
5
|
# coding: utf-8
import logging
import os
import tempfile
from django.conf import settings
from django.core.mail.message import EmailMessage
from excel_renderer import ExcelRenderer
log = logging.getLogger(__name__)
class GenerateExcelSheet:
def __init__(self, shifts, mailer):
if not shifts:
raise AssertionError(u'No shifts given. Cannot generate Excel file for {}.'.format(mailer.facility))
self.shifts = shifts
self.mailer = mailer
self.tmpdir = tempfile.mkdtemp()
self.tmpfiles = []
def __del__(self):
for filename in self.tmpfiles:
if filename and os.path.exists(filename):
os.remove(filename)
if self.tmpdir is not None and os.path.exists(self.tmpdir):
log.debug(u'Removing tmpdir %s', self.tmpdir)
os.removedirs(self.tmpdir)
def generate_excel(self):
if self.shifts is None or self.shifts.count() == 0:
log.warn(u'No shifts, not shift schedule.')
return None
log.debug(u'About to generate XLS for facility "%s"', self.shifts[0].facility)
log.debug(u'Shifts query: %s', self.shifts.query)
filename = os.path.join(self.tmpdir, u'Dienstplan_{}_{}.xls'.format(self.mailer.organization,
self.shifts[0].starting_time
.strftime('%Y%m%d')))
self.tmpfiles.append(filename)
renderer = ExcelRenderer()
renderer.generate_shift_overview(self.mailer.organization, self.mailer.facility, self.shifts, filename)
return filename
def send_file(self):
attachment = self.generate_excel()
if not self.mailer:
log.error(u'Cannot create and send email without mailer information')
return
mail = EmailMessage()
mail.body = "Hallo " + self.mailer.first_name + " " + self.mailer.last_name + "\n\n"\
"Anbei die Liste zum Dienstplan der Freiwilligen.\nDies ist ein Service von volunteer-planner.org"
mail.subject = "Dienstplan fuer den " + self.shifts[0].starting_time.strftime("%d.%m.%Y") + \
" der Freiwilligen in der Unterkunft " + self.shifts[0].facility.name
mail.from_email = settings.DEFAULT_FROM_EMAIL
mail.to = [str(self.mailer.email)]
if attachment is not None:
mail.attach_file(path=attachment, mimetype='application/vnd.ms-excel')
mail.send()
else:
log.warn(u'No attachment, not mail.')
|
KiChjang/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/importlib_metadata/tests/test_zip.py
|
12
|
import sys
import unittest
from importlib_metadata import (
distribution, entry_points, files, PackageNotFoundError,
version, distributions,
)
try:
from importlib import resources
getattr(resources, 'files')
getattr(resources, 'as_file')
except (ImportError, AttributeError):
import importlib_resources as resources
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
class TestZip(unittest.TestCase):
root = 'tests.data'
def _fixture_on_path(self, filename):
pkg_file = resources.files(self.root).joinpath(filename)
file = self.resources.enter_context(resources.as_file(pkg_file))
assert file.name.startswith('example-'), file.name
sys.path.insert(0, str(file))
self.resources.callback(sys.path.pop, 0)
def setUp(self):
# Find the path to the example-*.whl so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
self._fixture_on_path('example-21.12-py3-none-any.whl')
def test_zip_version(self):
self.assertEqual(version('example'), '21.12')
def test_zip_version_does_not_match(self):
with self.assertRaises(PackageNotFoundError):
version('definitely-not-installed')
def test_zip_entry_points(self):
scripts = dict(entry_points()['console_scripts'])
entry_point = scripts['example']
self.assertEqual(entry_point.value, 'example:main')
entry_point = scripts['Example']
self.assertEqual(entry_point.value, 'example:main')
def test_missing_metadata(self):
self.assertIsNone(distribution('example').read_text('does not exist'))
def test_case_insensitive(self):
self.assertEqual(version('Example'), '21.12')
def test_files(self):
for file in files('example'):
path = str(file.dist.locate_file(file))
assert '.whl/' in path, path
def test_one_distribution(self):
dists = list(distributions(path=sys.path[:1]))
assert len(dists) == 1
class TestEgg(TestZip):
def setUp(self):
# Find the path to the example-*.egg so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
self._fixture_on_path('example-21.12-py3.6.egg')
def test_files(self):
for file in files('example'):
path = str(file.dist.locate_file(file))
assert '.egg/' in path, path
|
abhishekgahlot/tornado-boilerplate
|
refs/heads/master
|
settings.py
|
6
|
import logging
import tornado
import tornado.template
import os
from tornado.options import define, options
import environment
import logconfig
# Make filepaths relative to settings.
path = lambda root,*a: os.path.join(root, *a)
ROOT = os.path.dirname(os.path.abspath(__file__))
define("port", default=8888, help="run on the given port", type=int)
define("config", default=None, help="tornado config file")
define("debug", default=False, help="debug mode")
tornado.options.parse_command_line()
MEDIA_ROOT = path(ROOT, 'media')
TEMPLATE_ROOT = path(ROOT, 'templates')
# Deployment Configuration
class DeploymentType:
PRODUCTION = "PRODUCTION"
DEV = "DEV"
SOLO = "SOLO"
STAGING = "STAGING"
dict = {
SOLO: 1,
PRODUCTION: 2,
DEV: 3,
STAGING: 4
}
if 'DEPLOYMENT_TYPE' in os.environ:
DEPLOYMENT = os.environ['DEPLOYMENT_TYPE'].upper()
else:
DEPLOYMENT = DeploymentType.SOLO
settings = {}
settings['debug'] = DEPLOYMENT != DeploymentType.PRODUCTION or options.debug
settings['static_path'] = MEDIA_ROOT
settings['cookie_secret'] = "your-cookie-secret"
settings['xsrf_cookies'] = True
settings['template_loader'] = tornado.template.Loader(TEMPLATE_ROOT)
SYSLOG_TAG = "boilerplate"
SYSLOG_FACILITY = logging.handlers.SysLogHandler.LOG_LOCAL2
# See PEP 391 and logconfig for formatting help. Each section of LOGGERS
# will get merged into the corresponding section of log_settings.py.
# Handlers and log levels are set up automatically based on LOG_LEVEL and DEBUG
# unless you set them here. Messages will not propagate through a logger
# unless propagate: True is set.
LOGGERS = {
'loggers': {
'boilerplate': {},
},
}
if settings['debug']:
LOG_LEVEL = logging.DEBUG
else:
LOG_LEVEL = logging.INFO
USE_SYSLOG = DEPLOYMENT != DeploymentType.SOLO
logconfig.initialize_logging(SYSLOG_TAG, SYSLOG_FACILITY, LOGGERS,
LOG_LEVEL, USE_SYSLOG)
if options.config:
tornado.options.parse_config_file(options.config)
|
akretion/odoo
|
refs/heads/12-patch-paging-100-in-o2m
|
addons/website_crm/models/res_config_settings.py
|
12
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
def _get_crm_default_team_domain(self):
if self.env.user.has_group('crm.group_use_lead'):
return [('use_leads', '=', True)]
else:
return [('use_opportunities', '=', True)]
crm_default_team_id = fields.Many2one(
'crm.team', string='Default Sales Team', related='website_id.crm_default_team_id', readonly=False,
domain=lambda self: self._get_crm_default_team_domain(),
help='Default Sales Team for new leads created through the Contact Us form.')
crm_default_user_id = fields.Many2one(
'res.users', string='Default Salesperson', related='website_id.crm_default_user_id', domain=[('share', '=', False)], readonly=False,
help='Default salesperson for new leads created through the Contact Us form.')
|
EmadMokhtar/Django
|
refs/heads/master
|
django/core/mail/backends/locmem.py
|
128
|
"""
Backend for test environment.
"""
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""
An email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(mail, 'outbox'):
mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
msg_count = 0
for message in messages: # .message() triggers header validation
message.message()
mail.outbox.append(message)
msg_count += 1
return msg_count
|
lulf/qpid-dispatch
|
refs/heads/master
|
tests/system_tests_drain_support.py
|
2
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton.handlers import MessagingHandler
from proton.reactor import Container
from proton import Message, Endpoint
from system_test import TIMEOUT
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class DrainMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked and we can
# declare that the test is successful
if self.received_count == 10 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
def on_sendable(self, event):
if self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 20. This means that we will receive all the 10 messages
# that the sender is sending. The router will also send back a response flow frame with
# drain=True but I don't have any way of making sure that the response frame reached the
# receiver
event.receiver.drain(20)
def run(self):
Container(self).run()
class DrainOneMessageHandler(DrainMessagesHandler):
def __init__(self, address):
super(DrainOneMessageHandler, self).__init__(address)
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 1 after we receive the 4th message.
# This means that going forward, we will receive only one more message.
event.receiver.drain(1)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 5 messages received (4 earlier messages and 1 extra message for drain=1)
# indicates that the drain worked and we can declare that the test is successful
if self.received_count == 5 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
class DrainNoMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainNoMoreMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMoreMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.sent = 0
self.rcvd = 0
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d" % (self.sent, self.rcvd)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
if self.sent == 0:
msg = Message(body="Hello World")
event.sender.send(msg)
self.sent += 1
def on_message(self, event):
self.rcvd += 1
def on_settled(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
|
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
openedx/core/djangoapps/content/course_overviews/tests/test_tasks.py
|
18
|
import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..tasks import enqueue_async_course_overview_update_tasks
class BatchedAsyncCourseOverviewUpdateTests(ModuleStoreTestCase):
def setUp(self):
super(BatchedAsyncCourseOverviewUpdateTests, self).setUp()
self.course_1 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.course_2 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.course_3 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
@mock.patch('openedx.core.djangoapps.content.course_overviews.models.CourseOverview.update_select_courses')
def test_enqueue_all_courses_in_single_batch(self, mock_update_courses):
enqueue_async_course_overview_update_tasks(
course_ids=[],
force_update=True,
all_courses=True
)
called_args, called_kwargs = mock_update_courses.call_args_list[0]
self.assertEqual(sorted([self.course_1.id, self.course_2.id, self.course_3.id]), sorted(called_args[0]))
self.assertEqual({'force_update': True}, called_kwargs)
self.assertEqual(1, mock_update_courses.call_count)
@mock.patch('openedx.core.djangoapps.content.course_overviews.models.CourseOverview.update_select_courses')
def test_enqueue_specific_courses_in_two_batches(self, mock_update_courses):
enqueue_async_course_overview_update_tasks(
course_ids=[unicode(self.course_1.id), unicode(self.course_2.id)],
force_update=True,
chunk_size=1,
all_courses=False
)
mock_update_courses.assert_has_calls([
mock.call([self.course_1.id], force_update=True),
mock.call([self.course_2.id], force_update=True)
])
|
medspx/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/grass7/ext/r_topmodel.py
|
5
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_topmodel.py
-------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def processCommand(alg, parameters):
# We temporary remove the output
command = "r.topmodel parameters=\"{}\" topidxstats=\"{}\" input=\"{}\" output=\"{}\" {} {}--overwrite".format(
alg.getParameterValue('parameters'),
alg.getParameterValue('topidxstats'),
alg.getParameterValue('input'),
alg.getOutputValue('output'),
'timestep={}'.format(alg.getParameterValue('timestep')) if alg.getParameterValue('timestep') else '',
'topidxclass={}'.format(alg.getParameterValue('topidxclass')) if alg.getParameterValue('topidxclass') else ''
)
alg.commands.append(command)
|
DeadSix27/python_cross_compile_script
|
refs/heads/master
|
packages/dependencies/libopenh264.py
|
1
|
{
'repo_type' : 'git',
'url' : 'https://github.com/cisco/openh264.git',
'patches' : [
('openh264/0001-remove-fma3-call.patch','-p1'),
],
'needs_configure' : False,
'build_options' : '{make_prefix_options} OS=mingw_nt ARCH={bit_name} ASM=yasm',
'install_options' : '{make_prefix_options} OS=mingw_nt',
'install_target' : 'install-static',
'_info' : { 'version' : None, 'fancy_name' : 'openh264' },
}
|
nickhand/nbodykit
|
refs/heads/master
|
nbodykit/source/mesh/tests/test_linear.py
|
2
|
from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
setup_logging()
@MPITest([1,4])
def test_paint(comm):
cosmo = cosmology.Planck15
# linear grid
Plin = cosmology.LinearPower(cosmo, redshift=0.55, transfer='EisensteinHu')
source = LinearMesh(Plin, Nmesh=64, BoxSize=512, seed=42, comm=comm)
# compute P(k) from linear grid
r = FFTPower(source, mode='1d', Nmesh=64, dk=0.01, kmin=0.005)
# run and get the result
valid = r.power['modes'] > 0
# variance of each point is 2*P^2/N_modes
theory = Plin(r.power['k'][valid])
errs = (2*theory**2/r.power['modes'][valid])**0.5
# compute reduced chi-squared of measurement to theory
chisq = ((r.power['power'][valid].real - theory)/errs)**2
N = valid.sum()
red_chisq = chisq.sum() / (N-1)
# make sure it is less than 1.5 (should be ~1)
assert red_chisq < 1.5, "reduced chi sq of linear grid measurement = %.3f" %red_chisq
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.