repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tseaver/gcloud-python | monitoring/google/cloud/monitoring_v3/__init__.py | 3 | 2208 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.monitoring_v3 import types
from google.cloud.monitoring_v3.gapic import alert_policy_service_client
from google.cloud.monitoring_v3.gapic import enums
from google.cloud.monitoring_v3.gapic import group_service_client
from google.cloud.monitoring_v3.gapic import metric_service_client
from google.cloud.monitoring_v3.gapic import (
notification_channel_service_client as notification_client)
from google.cloud.monitoring_v3.gapic import uptime_check_service_client
class AlertPolicyServiceClient(
alert_policy_service_client.AlertPolicyServiceClient):
__doc__ = alert_policy_service_client.AlertPolicyServiceClient.__doc__
enums = enums
class GroupServiceClient(group_service_client.GroupServiceClient):
__doc__ = group_service_client.GroupServiceClient.__doc__
enums = enums
class MetricServiceClient(metric_service_client.MetricServiceClient):
__doc__ = metric_service_client.MetricServiceClient.__doc__
enums = enums
class NotificationChannelServiceClient(
notification_client.NotificationChannelServiceClient):
__doc__ = notification_client.NotificationChannelServiceClient.__doc__
enums = enums
class UptimeCheckServiceClient(
uptime_check_service_client.UptimeCheckServiceClient):
__doc__ = uptime_check_service_client.UptimeCheckServiceClient.__doc__
enums = enums
__all__ = (
'enums',
'types',
'AlertPolicyServiceClient',
'GroupServiceClient',
'MetricServiceClient',
'NotificationChannelServiceClient',
'UptimeCheckServiceClient',
)
| apache-2.0 | -6,751,379,221,389,893,000 | 32.969231 | 74 | 0.760417 | false |
abusse/cinder | cinder/volume/drivers/ibm/storwize_svc/__init__.py | 2 | 51024 | # Copyright 2013 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
import math
import time
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
from cinder.volume.drivers.ibm.storwize_svc import replication as storwize_rep
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.StrOpt('storwize_svc_volpool_name',
default='volpool',
help='Storage system storage pool for volumes'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.IntOpt('storwize_svc_vol_iogrp',
default=0,
help='The I/O group in which to allocate volumes'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared. Maximum value is 600 seconds (10 minutes)'),
cfg.StrOpt('storwize_svc_connection_protocol',
default='iSCSI',
help='Connection protocol (iSCSI/FC)'),
cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
default=True,
help='Configure CHAP authentication for iSCSI connections '
'(Default: Enabled)'),
cfg.BoolOpt('storwize_svc_multipath_enabled',
default=False,
help='Connect with multipath (FC only; iSCSI multipath is '
'controlled by Nova)'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='Allows vdisk to multi host mapping'),
cfg.BoolOpt('storwize_svc_npiv_compatibility_mode',
default=False,
help='Indicate whether svc driver is compatible for NPIV '
'setup. If it is compatible, it will allow no wwpns '
'being returned on get_conn_fc_wwpns during '
'initialize_connection'),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts)
class StorwizeSVCDriver(san.SanDriver):
"""IBM Storwize V7000 and SVC iSCSI/FC volume driver.
Version history:
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
lsfabric, clear unused data from connections, ensure matching
WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
"""
VERSION = "1.3.2"
VDISKCOPYOPS_INTERVAL = 600
def __init__(self, *args, **kwargs):
super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.replication = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
# Storwize has the limitation that can not burst more than 3 new ssh
# connections within 1 second. So slow down the initialization.
time.sleep(1)
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Get the replication helpers
self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
# Validate that the pool exists
pool = self.configuration.storwize_svc_volpool_name
try:
self._helpers.get_pool_attrs(pool)
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s') % pool
raise exception.InvalidInput(reason=msg)
# Check if compression is supported
self._state['compression_enabled'] = \
self._helpers.compression_enabled()
# Get the available I/O groups
self._state['available_iogrps'] = \
self._helpers.get_available_io_groups()
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].iteritems():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = self.db.volume_get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = self.db.volume_admin_metadata_get(admin_context,
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug('leave: do_setup')
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id'))
raise exception.VolumeBackendAPIException(data=exception_msg)
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option'))
# Check that flashcopy_timeout is not more than 10 minutes
flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout
if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600):
raise exception.InvalidInput(
reason=_('Illegal value %d specified for '
'storwize_svc_flashcopy_timeout: '
'valid values are between 0 and 600')
% flashcopy_timeout)
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug('leave: check_for_setup_error')
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
LOG.error(_LE('ensure_export: Volume %s not found on storage')
% volume['name'])
def create_export(self, ctxt, volume):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def validate_connector(self, connector):
"""Check connector for at least one enabled protocol (iSCSI/FC)."""
valid = False
if ('iSCSI' in self._state['enabled_protocols'] and
'initiator' in connector):
valid = True
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
valid = True
if not valid:
msg = (_LE('The connector does not contain the required '
'information.'))
LOG.error(msg)
raise exception.InvalidConnectorException(
missing='initiator or wwpns')
def _get_vdisk_params(self, type_id, volume_type=None,
volume_metadata=None):
return self._helpers.get_vdisk_params(self.configuration, self._state,
type_id, volume_type=volume_type,
volume_metadata=volume_metadata)
@fczm_utils.AddFCZone
@utils.synchronized('storwize-host', external=True)
def initialize_connection(self, volume, connector):
"""Perform the necessary work so that an iSCSI/FC connection can
be made.
To be able to create an iSCSI/FC connection from a given host to a
volume, we must:
1. Translate the given iSCSI name or WWNN to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
volume_name = volume['name']
# Delete irrelevant connection information that later could result
# in unwanted behaviour. For example, if FC is used yet the hosts
# return iSCSI data, the driver will try to create the iSCSI connection
# which can result in a nice error about reaching the per-host maximum
# iSCSI initiator limit.
# First make a copy so we don't mess with a caller's connector.
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
# Check if a host object is defined for this host name
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._helpers.create_host(connector)
if vol_opts['protocol'] == 'iSCSI':
chap_secret = self._helpers.get_chap_secret_for_host(host_name)
chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret:
LOG.warning(_LW('CHAP secret exists for host but CHAP is '
'disabled'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
'lsvdisk: %s') % e)
msg = (_('initialize_connection: Missing volume '
'attribute for volume %s') % volume_name)
raise exception.VolumeBackendAPIException(data=msg)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in self._state['storage_nodes'].itervalues():
if vol_opts['protocol'] not in node['enabled_protocols']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warn(_LW('initialize_connection: Did not find a preferred '
'node for volume %s') % volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
if vol_opts['protocol'] == 'iSCSI':
type_str = 'iscsi'
if len(preferred_node_entry['ipv4']):
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
properties['target_iqn'] = preferred_node_entry['iscsi_name']
if chap_secret:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = connector['initiator']
properties['auth_password'] = chap_secret
properties['discovery_auth_method'] = 'CHAP'
properties['discovery_auth_username'] = (
connector['initiator'])
properties['discovery_auth_password'] = chap_secret
else:
type_str = 'fibre_channel'
conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
# If conn_wwpns is empty, then that means that there were
# no target ports with visibility to any of the initiators.
# We will either fail the attach, or return all target
# ports, depending on the value of the
# storwize_svc_npiv_compatibity_mode flag.
if len(conn_wwpns) == 0:
npiv_compat = self.configuration.\
storwize_svc_npiv_compatibility_mode
if not npiv_compat:
msg = (_('Could not get FC connection information for '
'the host-volume connection. Is the host '
'configured properly for FC connections?'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
for node in self._state['storage_nodes'].itervalues():
conn_wwpns.extend(node['WWPN'])
if not vol_opts['multipath']:
# preferred_node_entry can have a list of WWPNs while only
# one WWPN may be available on the storage host. Here we
# walk through the nodes until we find one that works,
# default to the first WWPN otherwise.
for WWPN in preferred_node_entry['WWPN']:
if WWPN in conn_wwpns:
properties['target_wwn'] = WWPN
break
else:
LOG.warning(_LW('Unable to find a preferred node match'
' for node %(node)s in the list of '
'available WWPNs on %(host)s. '
'Using first available.') %
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
else:
properties['target_wwn'] = conn_wwpns
i_t_map = self._make_initiator_target_map(connector['wwpns'],
conn_wwpns)
properties['initiator_target_map'] = i_t_map
# specific for z/VM, refer to cinder bug 1323993
if "zvm_fcp" in connector:
properties['zvm_fcp'] = connector['zvm_fcp']
except Exception:
with excutils.save_and_reraise_exception():
self.terminate_connection(volume, connector)
LOG.error(_LE('initialize_connection: Failed '
'to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n'), {'vol': volume,
'conn': connector})
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s',
{'vol': volume['id'], 'conn': connector,
'prop': properties})
return {'driver_volume_type': type_str, 'data': properties, }
def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
@fczm_utils.RemoveFCZone
@utils.synchronized('storwize-host', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an iSCSI connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_name = volume['name']
if 'host' in connector:
# maybe two hosts on the storage, one is for FC and the other for
# iSCSI, so get host according to protocol
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
msg = (_('terminate_connection: Failed to get host name from'
' connector.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
# See bug #1244257
host_name = None
info = {}
if 'wwpns' in connector and host_name:
target_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
init_targ_map = self._make_initiator_target_map(connector['wwpns'],
target_wwpns)
info = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map': init_targ_map}}
self._helpers.unmap_vol_from_host(vol_name, host_name)
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s', {'vol': volume['id'],
'conn': connector})
return info
def create_volume(self, volume):
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = self.configuration.storwize_svc_volpool_name
self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
model_update = None
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
model_update = self.replication.create_replica(ctxt, volume)
return model_update
def delete_volume(self, volume):
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
if volume['size'] != snapshot['volume_size']:
msg = (_('create_volume_from_snapshot: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
replica_status = self.replication.create_replica(ctxt, volume)
if replica_status:
return replica_status
def create_cloned_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True)
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
replica_status = self.replication.create_replica(ctxt, tgt_volume)
if replica_status:
return replica_status
def extend_volume(self, volume, new_size):
LOG.debug('enter: extend_volume: volume %s' % volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
msg = (_('extend_volume: Extending a volume with snapshots is not '
'supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug('leave: extend_volume: volume %s' % volume['id'])
def add_vdisk_copy(self, volume, dest_pool, vol_type):
return self._helpers.add_vdisk_copy(volume, dest_pool,
vol_type, self._state,
self.configuration)
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
msg = (_('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
return
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the '
'specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not '
'have the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def promote_replica(self, ctxt, volume):
return self.replication.promote_replica(volume)
def reenable_replication(self, ctxt, volume):
return self.replication.reenable_replication(volume)
def create_replica_test_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
replica_status = self.replication.test_replica(tgt_volume,
src_volume)
return replica_status
def get_replication_status(self, ctxt, volume):
replica_status = None
if self.replication:
replica_status = self.replication.get_replication_status(volume)
return replica_status
def _check_volume_copy_ops(self):
LOG.debug("enter: update volume copy status")
ctxt = context.get_admin_context()
copy_items = self._vdiskcopyops.items()
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warn(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
msg = (_('_check_volume_copy_ops: Volume %(vol)s does not '
'have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.')
% {'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
LOG.info(msg)
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug("exit: update volume copy status")
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
if volume['volume_type_id'] is not None:
volume_type_id = volume['volume_type_id']
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
self._check_volume_copy_ops()
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
return (True, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = ignore_keys + no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_matadata'))
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
# Check if retype affects volume replication
model_update = None
old_type_replication = old_opts.get('replication', False)
new_type_replication = new_opts.get('replication', False)
# Delete replica if needed
if old_type_replication and not new_type_replication:
self.replication.delete_replica(volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': None,
'replication_extended_status': None}
vdisk_changes = []
need_copy = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] != dest_location:
need_copy = True
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
# If volume is replicated, can't copy
if new_type_replication:
msg = (_('Unable to retype: Current action needs volume-copy,'
' it is not allowed when new type is replication.'
' Volume = %s'), volume['id'])
raise exception.VolumeDriverException(message=msg)
retype_iogrp_property(volume,
new_opts['iogrp'],
old_opts['iogrp'])
try:
new_op = self.add_vdisk_copy(volume['name'],
dest_pool,
new_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_opts['iogrp'],
new_opts['iogrp'])
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
if new_opts['qos']:
# Add the new QoS setting to the volume. If the volume has an
# old QoS setting, it will be overwritten.
self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
elif old_opts['qos']:
# If the old_opts contain QoS keys, disable them.
self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
# Add replica if needed
if not old_type_replication and new_type_replication:
model_update = self.replication.create_replica(ctxt, volume,
new_type)
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
if vdisk is None:
reason = (_('No vdisk with the UID specified by source-id %s.')
% ref['source-id'])
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing Vdisk for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <uid of disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
if 'source-id' not in ref:
reason = _('Reference must contain source-id element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check for existence of the vdisk
vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
if vdisk is None:
reason = (_('No vdisk with the UID specified by source-id %s.')
% (ref['source-id']))
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def create_consistencygroup(self, context, group):
"""Create a consistency group.
IBM Storwize will create CG until cg-snapshot creation,
db will maintain the volumes and CG relationship.
"""
LOG.debug("Creating consistency group")
model_update = {'status': 'available'}
return model_update
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group.
IBM Storwize will delete the volumes of the CG.
"""
LOG.debug("deleting consistency group")
model_update = {}
model_update['status'] = 'deleted'
volumes = self.db.volume_get_all_by_group(context, group['id'])
for volume in volumes:
try:
self._helpers.delete_vdisk(volume['name'], True)
volume['status'] = 'deleted'
except exception.VolumeBackendAPIException as err:
volume['status'] = 'error_deleting'
if model_update['status'] != 'error_deleting':
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s."),
{'vol': volume['name'], 'exception': err})
return model_update, volumes
def create_cgsnapshot(self, ctxt, cgsnapshot):
"""Creates a cgsnapshot."""
# Use cgsnapshot id as cg name
cg_name = 'cg_snap-' + cgsnapshot['id']
# Create new cg as cg_snapshot
self._helpers.create_fc_consistgrp(cg_name)
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
ctxt, cgsnapshot['id'])
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.run_consistgrp_snapshots(cg_name,
snapshots,
self._state,
self.configuration,
timeout))
return model_update, snapshots_model
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
cgsnapshot_id = cgsnapshot['id']
cg_name = 'cg_snap-' + cgsnapshot_id
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
model_update, snapshots_model = (
self._helpers.delete_consistgrp_snapshots(cg_name,
snapshots))
return model_update, snapshots_model
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = list(self._state['enabled_protocols'])
data['total_capacity_gb'] = 0 # To be overwritten
data['free_capacity_gb'] = 0 # To be overwritten
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['consistencygroup_support'] = True
pool = self.configuration.storwize_svc_volpool_name
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = '%s_%s' % (self._state['system_name'], pool)
data['volume_backend_name'] = backend_name
attributes = self._helpers.get_pool_attrs(pool)
if not attributes:
LOG.error(_LE('Could not get pool data from the storage'))
exception_message = (_('_update_volume_stats: '
'Could not get storage pool data'))
raise exception.VolumeBackendAPIException(data=exception_message)
data['total_capacity_gb'] = (float(attributes['capacity']) /
units.Gi)
data['free_capacity_gb'] = (float(attributes['free_capacity']) /
units.Gi)
data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
data['compression_support'] = self._state['compression_enabled']
data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool})
if self.replication:
data.update(self.replication.get_replication_info())
self._stats = data
| apache-2.0 | -7,165,566,134,103,801,000 | 44.354667 | 79 | 0.544724 | false |
akaihola/bitcoin-price | old_versions/fast_dump_v11.py | 4 | 1386 | #!/usr/bin/env python3
import requests
import sys
def get_all():
page_num = 1
price_data = ''
while True:
req = requests.get("http://coinbase.com/api/v1/prices/historical?page="+str(page_num))
if req.status_code == 200:
price_data += '\n' + req.text
else:
price_data += "API error"
print("... getting page "+str(page_num))
page_num += 1
if req.text == "":
break
return price_data
if __name__ == '__main__':
sys.stdout.write(get_all())
#with open('.tmp/{}_full_output.py'.format(int(time.time())), 'a') as f1:
# f1.write('\n'+ price_data)
#price_data_format1 = price_data.replace(',','\n')
#with open('.tmp/{}_lines_removed.py'.format(int(time.time())), 'a') as f2:
# f2.write('\n' + price_data_format1)
#price_data_format2 = price_data_format1.split('\n')
#with open('.tmp/{}_xyxy.py'.format(int(time.time())), 'a') as f3:
# f3.write(str(price_data_format2))
#prices = price_data_format2[::2]
#k=1
#with open('.tmp/{}_prices.py'.format(int(time.time())), 'a') as f4:
# while k<len(prices):
# f4.write('{!r}\n'.format(prices[k]))
# k+=1
#timestamps = price_data_format2[1::2]
#j=1
#with open('.tmp/{}_stamps.py'.format(int(time.time())), 'a') as f5:
# while j<len(timestamps):
# f5.write('{!r}\n'.format(timestamps[j]))
# j += 1
| mit | -6,026,962,514,987,022,000 | 26.72 | 94 | 0.562049 | false |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py | 427 | 38314 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError('%r is an invalid value for '
'the %r property' % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
| apache-2.0 | 1,468,120,979,943,838,700 | 35.213611 | 79 | 0.523151 | false |
henriquefacioli/gd-ae- | gda/admin.py | 1 | 1632 | from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from import_export.admin import ImportExportActionModelAdmin
from gda.models import Questionnaire, Question, Choice, Answer
## Questionnaires
# Class to import and export Questionnaire
class QuestionnaireResource(resources.ModelResource):
class Meta:
model = Questionnaire
class QuestionnaireAdmin(ImportExportModelAdmin):
resource_class = QuestionnaireResource
list_display = ("pk", "name")
search_fields = ('id','name')
ordering = ['id']
admin.site.register(Questionnaire, QuestionnaireAdmin)
## Questions
# Class to import and export Questions
class QuestionResource(resources.ModelResource):
class Meta:
model = Question
class QuestionAdmin(ImportExportModelAdmin):
resource_class = QuestionResource
search_fields = ('text','type')
ordering = ['id']
admin.site.register(Question, QuestionAdmin)
## Choices
class ChoiceResource(resources.ModelResource):
class Meta:
model = Choice
class ChoiceAdmin(ImportExportModelAdmin):
resource_class = ChoiceResource
search_fields = ('id','text')
ordering = ['id']
admin.site.register(Choice, ChoiceAdmin)
## Answers
class AnswerResource(resources.ModelResource):
class Meta:
model = Answer
class AnswerAdmin(ImportExportModelAdmin):
resource_class = AnswerResource
list_display = ("question",)
search_fields = ('offering__subject__code',
'question__id')
ordering = ['question']
admin.site.register(Answer, AnswerAdmin)
| gpl-3.0 | 6,312,490,031,681,824,000 | 23.358209 | 62 | 0.729779 | false |
hutchison/bp_mgmt | bp_cupid/tests/test_login.py | 1 | 1795 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from django.contrib.auth.models import User
class TestLogin(StaticLiveServerTestCase):
def setUp(self):
self.username = 'alice'
self.email = '[email protected]'
self.password = 'test'
User.objects.create_user(self.username, self.email, self.password)
self.browser = webdriver.Firefox()
#self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_successful_login(self):
self.browser.get(self.live_server_url)
self.browser.find_element_by_link_text('Login').click()
input_username = self.browser.find_element_by_id('id_username')
input_username.send_keys(self.username)
input_password = self.browser.find_element_by_id('id_password')
input_password.send_keys(self.password)
self.browser.find_element_by_css_selector('[type=submit]').click()
self.assertIsNotNone(self.browser.find_element_by_id('logout'))
def test_failing_login(self):
self.browser.get(self.live_server_url)
self.browser.find_element_by_link_text('Login').click()
input_username = self.browser.find_element_by_id('id_username')
input_username.send_keys(self.username)
input_password = self.browser.find_element_by_id('id_password')
input_password.send_keys('foobar')
self.browser.find_element_by_css_selector('[type=submit]').click()
alert = self.browser.find_element_by_class_name('alert-danger')
self.assertEqual(
alert.text,
'Bitte einen gültigen Benutzername und ein Passwort eingeben. Beide Felder berücksichtigen die Groß-/Kleinschreibung.'
)
| agpl-3.0 | 3,040,446,276,414,484,500 | 36.333333 | 130 | 0.677455 | false |
bbannier/ROOT | interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/util.py | 12 | 2609 | # This file provides common utility functions for the test suite.
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
def get_tu(source, lang='c', all_warnings=False, flags=[]):
"""Obtain a translation unit from source and language.
By default, the translation unit is created from source file "t.<ext>"
where <ext> is the default file extension for the specified language. By
default it is C, so "t.c" is the default file name.
Supported languages are {c, cpp, objc}.
all_warnings is a convenience argument to enable all compiler warnings.
"""
args = list(flags)
name = 't.c'
if lang == 'cpp':
name = 't.cpp'
args.append('-std=c++11')
elif lang == 'objc':
name = 't.m'
elif lang != 'c':
raise Exception('Unknown language: %s' % lang)
if all_warnings:
args += ['-Wall', '-Wextra']
return TranslationUnit.from_source(name, args, unsaved_files=[(name,
source)])
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
return cursor
# Recurse into children.
result = get_cursor(cursor, spelling)
if result is not None:
return result
return None
def get_cursors(source, spelling):
"""Obtain all cursors from a source object with a specific spelling.
This provides a convenient search mechanism to find all cursors with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If no cursors are found, an empty list is returned.
"""
cursors = []
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
cursors.append(cursor)
# Recurse into children.
cursors.extend(get_cursors(cursor, spelling))
return cursors
__all__ = [
'get_cursor',
'get_cursors',
'get_tu',
]
| lgpl-2.1 | 951,695,942,087,871,500 | 27.053763 | 81 | 0.63051 | false |
StephaneP/volatility | volatility/conf.py | 57 | 15263 | ## This file was taken from PyFlag http://www.pyflag.net/
# Michael Cohen <[email protected]>
# David Collett <[email protected]>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ******************************************************
#pylint: disable-msg=C0111
""" Configuration modules for pyflag.
PyFlag is a complex package and requires a flexible configuration
system. The following are the requirements of the configuration
system:
1) Configuration must be available from a number of sources:
- Autoconf must be able to set things like the python path (in case
pyflag is installed to a different prefix)
- Users must be able to configure the installed system for their
specific requirements.
- Unconfigured parameters must be resolved at run time through the
GUI and saved.
2) Configuration must be able to apply to cases specifically.
3) Because pyflag is modular, configuration variables might be required
for each module. This means that definitions and declarations of
configuration variables must be distributed in each plugin.
These goals are achieved by the use of multiple sources of
configuration information:
- The system wide configuration file is this file: conf.py. It is
generated from the build system from conf.py.in by substituting
autoconfigured variables into it. It contains the most basic
settings related to the installation, e.g. which python interpreted
is used, where the python modules are installed etc. In particular
it refers to the location of the system configuration file (usually
found in /usr/local/etc/pyflagrc, or in /etc/pyflagrc).
- The sysconfig file contains things like where the upload
directory is, where to store temporary files etc. These are mainly
installation wide settings which are expected to be modified by the
administrator. Note that if you want the GUI to manipulate this
file it needs to be writable by the user running the GUI.
- Finally a conf table in each case is used to provide a per case
configuration
"""
import ConfigParser
import optparse
import os
import sys
default_config = "/etc/volatilityrc"
class PyFlagOptionParser(optparse.OptionParser):
final = False
help_hooks = []
def _process_args(self, largs, rargs, values):
try:
return optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.OptionValueError), err:
if self.final:
raise err
def error(self, msg):
## We cant emit errors about missing parameters until we are
## sure that all modules have registered all their parameters
if self.final:
return optparse.OptionParser.error(self, msg)
else:
raise RuntimeError(msg)
def print_help(self, file = sys.stdout):
optparse.OptionParser.print_help(self, file)
for cb in self.help_hooks:
file.write(cb())
class ConfObject(object):
""" This is a singleton class to manage the configuration.
This means it can be instantiated many times, but each instance
refers to the global configuration (which is set in class
variables).
NOTE: The class attributes have static dicts assigned to
facilitate singleton behaviour. This means all future instances
will have the same dicts.
"""
optparser = PyFlagOptionParser(add_help_option = False,
version = False,
)
initialised = False
## This is the globals dictionary which will be used for
## evaluating the configuration directives.
g_dict = dict(__builtins__ = None)
## These are the options derived by reading any config files
cnf_opts = {}
## Command line opts
opts = {}
args = None
default_opts = {}
docstrings = {}
## These are the actual options returned by the optparser:
optparse_opts = None
## Filename where the configuration file is:
_filename = None
_filenames = []
## These parameters can not be updated by the GUI (but will be
## propagated into new configuration files)
readonly = {}
## Absolute parameters can only be set by the code or command
## lines, they can not be over ridden in the configuration
## file. This ensures that only configuration files dont mask new
## options (e.g. schema version)
_absolute = {}
## A list of option names:
options = []
## Cache variants: There are configuration options which
## encapsulate the state of the running program. If any of these
## change all caches will be invalidated.
cache_invalidators = {}
def __init__(self):
""" This is a singleton object kept in the class """
if not ConfObject.initialised:
self.optparser.add_option("-h", "--help", action = "store_true", default = False,
help = "list all available options and their default values. Default values may be set in the configuration file (" + default_config + ")")
ConfObject.initialised = True
def set_usage(self, usage = None, version = None):
if usage:
self.optparser.set_usage(usage)
if version:
self.optparser.version = version
def add_file(self, filename, _type = 'init'):
""" Adds a new file to parse """
self._filenames.append(filename)
self.cnf_opts.clear()
for f in self._filenames:
try:
conf_parser = ConfigParser.ConfigParser()
conf_parser.read(f)
for k, v in conf_parser.items('DEFAULT'):
## Absolute parameters are protected from
## configuration files:
if k in self._absolute.keys():
continue
try:
v = eval(v, self.g_dict)
except Exception, _e:
pass
## update the configured options
self.cnf_opts[k] = v
except IOError:
print "Unable to open {0}".format(f)
ConfObject._filename = filename
def print_help(self):
return self.optparser.print_help()
def add_help_hook(self, cb):
""" Adds an epilog to the help message """
self.optparser.help_hooks.append(cb)
def set_help_hook(self, cb):
self.optparser.help_hooks = [cb]
def parse_options(self, final = True):
""" Parses the options from command line and any conf files
currently added.
The final parameter should be only called from main programs
at the point where they are prepared for us to call exit if
required; (For example when we detect the -h parameter).
"""
self.optparser.final = final
## Parse the command line options:
try:
(opts, args) = self.optparser.parse_args()
self.opts.clear()
## Update our cmdline dict:
for k in dir(opts):
v = getattr(opts, k)
if k in self.options and not v == None:
self.opts[k] = v
except UnboundLocalError:
raise RuntimeError("Unknown option - use -h to see help")
## If error() was called we catch it here
except RuntimeError:
opts = {}
## This gives us as much as was parsed so far
args = self.optparser.largs
self.optparse_opts = opts
self.args = args
if final:
## Reparse the config file again:
self.add_file(self._filename)
try:
## Help can only be set on the command line
if getattr(self.optparse_opts, "help"):
## Populate the metavars with the default values:
for opt in self.optparser.option_list:
try:
opt.metavar = "{0}".format((getattr(self, opt.dest) or
opt.dest.upper()))
except Exception, _e:
pass
self.optparser.print_help()
sys.exit(0)
except AttributeError:
pass
## Set the cache invalidators on the cache now:
import volatility.cache as cache
for k, v in self.cache_invalidators.items():
cache.CACHE.invalidate_on(k, v)
def remove_option(self, option):
""" Removes options both from the config file parser and the
command line parser
This should only by used on options *before* they have been read,
otherwise things could get very confusing.
"""
option = option.lower()
if option in self.cache_invalidators:
del self.cache_invalidators[option]
normalized_option = option.replace("-", "_")
if normalized_option not in self.options:
return
self.options.remove(normalized_option)
if normalized_option in self.readonly:
del self.readonly[normalized_option]
if normalized_option in self.default_opts:
del self.default_opts[normalized_option]
if normalized_option in self._absolute:
del self._absolute[normalized_option]
del self.docstrings[normalized_option]
self.optparser.remove_option("--{0}".format(option))
try:
self.parse_options(False)
except AttributeError:
pass
def add_option(self, option, short_option = None,
cache_invalidator = True,
**args):
""" Adds options both to the config file parser and the
command line parser.
Args:
option: The long option name.
short_option: An optional short option.
cache_invalidator: If set, when this option
changes all caches are invalidated.
"""
option = option.lower()
if cache_invalidator:
self.cache_invalidators[option] = lambda : self.get_value(option)
normalized_option = option.replace("-", "_")
if normalized_option in self.options:
return
self.options.append(normalized_option)
## If this is read only we store it in a special dict
try:
if args['readonly']:
self.readonly[normalized_option] = args['default']
del args['readonly']
except KeyError:
pass
## If there is a default specified, we update our defaults dict:
try:
default = args['default']
try:
default = eval(default, self.g_dict)
except:
pass
self.default_opts[normalized_option] = default
del args['default']
except KeyError:
pass
try:
self._absolute[normalized_option] = args['absolute']
del args['absolute']
except KeyError:
pass
self.docstrings[normalized_option] = args.get('help', None)
if short_option:
self.optparser.add_option("-{0}".format(short_option), "--{0}".format(option), **args)
else:
self.optparser.add_option("--{0}".format(option), **args)
## update the command line parser
## We have to do the try-catch for python 2.4 support of short
## arguments. It can be removed when python 2.5 is a requirement
try:
self.parse_options(False)
except AttributeError:
pass
def update(self, key, value):
""" This can be used by scripts to force a value of an option """
self.readonly[key.lower()] = value
def get_value(self, key):
return getattr(self, key.replace("-", "_"))
def __getattr__(self, attr):
## If someone is looking for a configuration parameter but
## we have not parsed anything yet - do so now.
if self.opts == None:
self.parse_options(False)
## Maybe its a class method?
try:
return super(ConfObject, self).__getattribute__(attr)
except AttributeError:
pass
## Is it a ready only parameter (i.e. can not be overridden by
## the config file)
try:
return self.readonly[attr.lower()]
except KeyError:
pass
## Try to find the attribute in the command line options:
try:
return self.opts[attr.lower()]
except KeyError:
pass
## Has it already been parsed?
try:
tmp = getattr(self.optparser.values, attr.lower())
if tmp:
return tmp
except AttributeError:
pass
## Was it given in the environment?
try:
return os.environ["VOLATILITY_" + attr.upper()]
except KeyError:
pass
## No - try the configuration file:
try:
return self.cnf_opts[attr.lower()]
except KeyError:
pass
## No - is there a default for it?
try:
return self.default_opts[attr.lower()]
except KeyError:
pass
## Maybe its just a command line option:
try:
if not attr.startswith("_") and self.optparse_opts:
return getattr(self.optparse_opts, attr.lower())
except AttributeError:
pass
raise AttributeError("Parameter {0} is not configured - try setting it on the command line (-h for help)".format(attr))
class DummyConfig(ConfObject):
pass
config = ConfObject()
if os.access(default_config, os.R_OK):
config.add_file(default_config)
else:
config.add_file("volatilityrc")
default_conf_path = ".volatilityrc"
try:
default_conf_path = os.environ['HOME'] + '/.volatilityrc'
except KeyError:
pass
config.add_option("CONF-FILE", default = default_conf_path,
cache_invalidator = False,
help = "User based configuration file")
config.add_file(config.CONF_FILE)
| gpl-2.0 | -8,215,620,541,887,306,000 | 32.036797 | 167 | 0.594379 | false |
pincopallino93/rdfendpoints | lib/rdflib/plugins/parsers/pyRdfa/rdfs/__init__.py | 25 | 1729 | # -*- coding: utf-8 -*-
"""
Separate module to handle vocabulary expansions. The L{cache} module takes care of caching vocabulary graphs; the L{process}
module takes care of the expansion itself.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: __init__.py,v 1.4 2012/08/20 13:15:28 ivan Exp $ $Date: 2012/08/20 13:15:28 $
"""
import sys
import os
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
from .. import RDFaError, pyRdfaError
from .. import ns_rdfa, ns_xsd, ns_distill
VocabCachingInfo = ns_distill["VocabCachingInfo"]
# Error message texts
err_outdated_cache = "Vocab document <%s> could not be dereferenced; using possibly outdated cache"
err_unreachable_vocab = "Vocab document <%s> could not be dereferenced"
err_unparsable_Turtle_vocab = "Could not parse vocab in Turtle at <%s> (%s)"
err_unparsable_xml_vocab = "Could not parse vocab in RDF/XML at <%s> (%s)"
err_unparsable_ntriples_vocab = "Could not parse vocab in N-Triple at <%s> (%s)"
err_unparsable_rdfa_vocab = "Could not parse vocab in RDFa at <%s> (%s)"
err_unrecognised_vocab_type = "Unrecognized media type for the vocab file <%s>: '%s'"
| apache-2.0 | -5,029,061,760,942,173,000 | 34.265306 | 124 | 0.725116 | false |
chispita/epiwork | apps/survey/migrations/0005_verify_single_user_assumption.py | 4 | 12061 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for survey_user in orm['survey.SurveyUser'].objects.all():
assert survey_user.user.count() <= 1, survey_user.global_id
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.extraresponse': {
'Meta': {'object_name': 'ExtraResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.lastresponse': {
'Meta': {'object_name': 'LastResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'})
},
'survey.localflusurvey': {
'Meta': {'object_name': 'LocalFluSurvey'},
'age_user': ('django.db.models.fields.SmallIntegerField', [], {}),
'data': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.localprofile': {
'Meta': {'object_name': 'LocalProfile'},
'a_family': ('django.db.models.fields.SmallIntegerField', [], {}),
'a_smoker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_current': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_seasonal': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_swine': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'sq_date_first': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_date_last': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_num_season': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'sq_num_total': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'survey.localresponse': {
'Meta': {'object_name': 'LocalResponse'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.participation': {
'Meta': {'object_name': 'Participation'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epidb_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'previous_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'previous_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Survey']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Survey']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.profilesendqueue': {
'Meta': {'object_name': 'ProfileSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.responsesendqueue': {
'Meta': {'object_name': 'ResponseSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specification': ('django.db.models.fields.TextField', [], {}),
'survey_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'survey.surveyuser': {
'Meta': {'object_name': 'SurveyUser'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'global_id': ('django.db.models.fields.CharField', [], {'default': "'ccb466d8-5d2d-488f-b539-5d077b609db7'", 'unique': 'True', 'max_length': '36'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'last_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
}
}
complete_apps = ['survey']
| agpl-3.0 | -6,844,392,325,395,507,000 | 72.09697 | 182 | 0.546306 | false |
harmy/kbengine | kbe/src/lib/python/Lib/test/test_plistlib.py | 55 | 7809 | # Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
import datetime
from test import support
# This test data was generated through Cocoa's NSDictionary class
TESTDATA = b"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" \
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>aDate</key>
<date>2004-10-26T10:33:33Z</date>
<key>aDict</key>
<dict>
<key>aFalseValue</key>
<false/>
<key>aTrueValue</key>
<true/>
<key>aUnicodeValue</key>
<string>M\xc3\xa4ssig, Ma\xc3\x9f</string>
<key>anotherString</key>
<string><hello & 'hi' there!></string>
<key>deeperDict</key>
<dict>
<key>a</key>
<integer>17</integer>
<key>b</key>
<real>32.5</real>
<key>c</key>
<array>
<integer>1</integer>
<integer>2</integer>
<string>text</string>
</array>
</dict>
</dict>
<key>aFloat</key>
<real>0.5</real>
<key>aList</key>
<array>
<string>A</string>
<string>B</string>
<integer>12</integer>
<real>32.5</real>
<array>
<integer>1</integer>
<integer>2</integer>
<integer>3</integer>
</array>
</array>
<key>aString</key>
<string>Doodah</string>
<key>anInt</key>
<integer>728</integer>
<key>nestedData</key>
<array>
<data>
PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5r
PgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5
IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBi
aW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3Rz
IG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQID
PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAw==
</data>
</array>
<key>someData</key>
<data>
PGJpbmFyeSBndW5rPg==
</data>
<key>someMoreData</key>
<data>
PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8
bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxv
dHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90
cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAw==
</data>
<key>\xc3\x85benraa</key>
<string>That was a unicode key.</string>
</dict>
</plist>
""".replace(b" " * 8, b"\t") # Apple as well as plistlib.py output hard tabs
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = plistlib.Data(b"<binary gunk>"),
someMoreData = plistlib.Data(b"<lots of binary gunk>\0\1\2\3" * 10),
nestedData = [plistlib.Data(b"<lots of binary gunk>\0\1\2\3" * 10)],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
plistlib.writePlist(pl, support.TESTFN)
pl2 = plistlib.readPlist(support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_bytes(self):
pl = self._create()
data = plistlib.writePlistToBytes(pl)
pl2 = plistlib.readPlistFromBytes(data)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.writePlistToBytes(pl2)
self.assertEqual(data, data2)
def test_appleformatting(self):
pl = plistlib.readPlistFromBytes(TESTDATA)
data = plistlib.writePlistToBytes(pl)
self.assertEqual(data, TESTDATA,
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
pl = self._create()
pl2 = plistlib.readPlistFromBytes(TESTDATA)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
from io import BytesIO
b = BytesIO()
pl = self._create()
plistlib.writePlist(pl, b)
pl2 = plistlib.readPlist(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
plistlib.writePlistToBytes(testString)
else:
self.assertRaises(ValueError,
plistlib.writePlistToBytes,
testString)
def test_nondictroot(self):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.readPlistFromBytes(plistlib.writePlistToBytes(test1))
result2 = plistlib.readPlistFromBytes(plistlib.writePlistToBytes(test2))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
("<plist><array>%s</array></plist>"%i).encode())
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
b"<plist><integer>not integer</integer></plist>")
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
b"<plist><integer>not real</integer></plist>")
def test_main():
support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
| lgpl-3.0 | 3,728,647,697,944,552,400 | 36.185714 | 91 | 0.543347 | false |
srm912/servo | tests/wpt/harness/wptrunner/metadata.py | 78 | 12836 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import sys
import tempfile
import types
import uuid
from collections import defaultdict
from mozlog import reader
from mozlog import structuredlog
import expected
import manifestupdate
import testloader
import wptmanifest
import wpttest
from vcs import git
manifest = None # Module that will be imported relative to test_root
logger = structuredlog.StructuredLogger("web-platform-tests")
def load_test_manifests(serve_root, test_paths):
do_delayed_imports(serve_root)
manifest_loader = testloader.ManifestLoader(test_paths, False)
return manifest_loader.load()
def update_expected(test_paths, serve_root, log_file_names,
rev_old=None, rev_new="HEAD", ignore_existing=False,
sync_root=None, property_order=None, boolean_properties=None):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run"""
manifests = load_test_manifests(serve_root, test_paths)
change_data = {}
if sync_root is not None:
if rev_old is not None:
rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
if rev_old is not None:
change_data = load_change_data(rev_old, rev_new, repo=sync_root)
expected_map_by_manifest = update_from_logs(manifests,
*log_file_names,
ignore_existing=ignore_existing,
property_order=property_order,
boolean_properties=boolean_properties)
for test_manifest, expected_map in expected_map_by_manifest.iteritems():
url_base = manifests[test_manifest]["url_base"]
metadata_path = test_paths[url_base]["metadata_path"]
write_changes(metadata_path, expected_map)
results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
return unexpected_changes(manifests, change_data, results_changed)
def do_delayed_imports(serve_root):
global manifest
from manifest import manifest
def files_in_repo(repo_root):
return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
def rev_range(rev_old, rev_new, symmetric=False):
joiner = ".." if not symmetric else "..."
return "".join([rev_old, joiner, rev_new])
def paths_changed(rev_old, rev_new, repo):
data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
for line in data.split("\n") if line.strip()]
output = set(lines)
return output
def load_change_data(rev_old, rev_new, repo):
changes = paths_changed(rev_old, rev_new, repo)
rv = {}
status_keys = {"M": "modified",
"A": "new",
"D": "deleted"}
# TODO: deal with renames
for item in changes:
rv[item[1]] = status_keys[item[0]]
return rv
def unexpected_changes(manifests, change_data, files_changed):
files_changed = set(files_changed)
root_manifest = None
for manifest, paths in manifests.iteritems():
if paths["url_base"] == "/":
root_manifest = manifest
break
else:
return []
rv = []
return [fn for fn, tests in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
# For each testrun
# Load all files and scan for the suite_start entry
# Build a hash of filename: properties
# For each different set of properties, gather all chunks
# For each chunk in the set of chunks, go through all tests
# for each test, make a map of {conditionals: [(platform, new_value)]}
# Repeat for each platform
# For each test in the list of tests:
# for each conditional:
# If all the new values match (or there aren't any) retain that conditional
# If any new values mismatch mark the test as needing human attention
# Check if all the RHS values are the same; if so collapse the conditionals
def update_from_logs(manifests, *log_filenames, **kwargs):
ignore_existing = kwargs.get("ignore_existing", False)
property_order = kwargs.get("property_order")
boolean_properties = kwargs.get("boolean_properties")
expected_map = {}
id_test_map = {}
for test_manifest, paths in manifests.iteritems():
expected_map_manifest, id_path_map_manifest = create_test_tree(
paths["metadata_path"],
test_manifest,
property_order=property_order,
boolean_properties=boolean_properties)
expected_map[test_manifest] = expected_map_manifest
id_test_map.update(id_path_map_manifest)
updater = ExpectedUpdater(manifests, expected_map, id_test_map,
ignore_existing=ignore_existing)
for log_filename in log_filenames:
with open(log_filename) as f:
updater.update_from_log(f)
for manifest_expected in expected_map.itervalues():
for tree in manifest_expected.itervalues():
for test in tree.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_expected()
test.coalesce_expected()
return expected_map
def directory_manifests(metadata_path):
rv = []
for dirpath, dirname, filenames in os.walk(metadata_path):
if "__dir__.ini" in filenames:
rel_path = os.path.relpath(dirpath, metadata_path)
rv.append(os.path.join(rel_path, "__dir__.ini"))
return rv
def write_changes(metadata_path, expected_map):
# First write the new manifest files to a temporary directory
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
write_new_expected(temp_path, expected_map)
# Keep all __dir__.ini files (these are not in expected_map because they
# aren't associated with a specific test)
keep_files = directory_manifests(metadata_path)
# Copy all files in the root to the temporary location since
# these cannot be ini files
keep_files.extend(item for item in os.listdir(metadata_path) if
not os.path.isdir(os.path.join(metadata_path, item)))
for item in keep_files:
dest_dir = os.path.dirname(os.path.join(temp_path, item))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(metadata_path, item),
os.path.join(temp_path, item))
# Then move the old manifest files to a new location
temp_path_2 = metadata_path + str(uuid.uuid4())
os.rename(metadata_path, temp_path_2)
# Move the new files to the destination location and remove the old files
os.rename(temp_path, metadata_path)
shutil.rmtree(temp_path_2)
def write_new_expected(metadata_path, expected_map):
# Serialize the data back to a file
for tree in expected_map.itervalues():
if not tree.is_empty:
manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
assert manifest_str != ""
path = expected.expected_path(metadata_path, tree.test_path)
dir = os.path.split(path)[0]
if not os.path.exists(dir):
os.makedirs(dir)
with open(path, "w") as f:
f.write(manifest_str)
class ExpectedUpdater(object):
def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
self.test_manifests = test_manifests
self.expected_tree = expected_tree
self.id_path_map = id_path_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
"test_status": self.test_status,
"test_end": self.test_end}
self.tests_visited = {}
self.test_cache = {}
def update_from_log(self, log_file):
self.run_info = None
log_reader = reader.read(log_file)
reader.each_log(log_reader, self.action_map)
def suite_start(self, data):
self.run_info = data["run_info"]
def test_id(self, id):
if type(id) in types.StringTypes:
return id
else:
return tuple(id)
def test_start(self, data):
test_id = self.test_id(data["test"])
try:
test_manifest, test = self.id_path_map[test_id]
expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
except KeyError:
print "Test not found %s, skipping" % test_id
return
self.test_cache[test_id] = expected_node
if test_id not in self.tests_visited:
if self.ignore_existing:
expected_node.clear_expected()
self.tests_visited[test_id] = set()
def test_status(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
subtest = test.get_subtest(data["subtest"])
self.tests_visited[test.id].add(data["subtest"])
result = test_cls.subtest_result_cls(
data["subtest"],
data["status"],
data.get("message"))
subtest.set_result(self.run_info, result)
def test_end(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
if data["status"] == "SKIP":
return
result = test_cls.result_cls(
data["status"],
data.get("message"))
test.set_result(self.run_info, result)
del self.test_cache[test_id]
def create_test_tree(metadata_path, test_manifest, property_order=None,
boolean_properties=None):
expected_map = {}
id_test_map = {}
exclude_types = frozenset(["stub", "helper", "manual"])
include_types = set(manifest.item_types) - exclude_types
for test_path, tests in test_manifest.itertypes(*include_types):
expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
property_order=property_order,
boolean_properties=boolean_properties)
if expected_data is None:
expected_data = create_expected(test_manifest,
test_path,
tests,
property_order=property_order,
boolean_properties=boolean_properties)
for test in tests:
id_test_map[test.id] = (test_manifest, test)
expected_map[test] = expected_data
return expected_map, id_test_map
def create_expected(test_manifest, test_path, tests, property_order=None,
boolean_properties=None):
expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base,
property_order=property_order,
boolean_properties=boolean_properties)
for test in tests:
expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected
def load_expected(test_manifest, metadata_path, test_path, tests, property_order=None,
boolean_properties=None):
expected_manifest = manifestupdate.get_manifest(metadata_path,
test_path,
test_manifest.url_base,
property_order=property_order,
boolean_properties=boolean_properties)
if expected_manifest is None:
return
tests_by_id = {item.id: item for item in tests}
# Remove expected data for tests that no longer exist
for test in expected_manifest.iterchildren():
if not test.id in tests_by_id:
test.remove()
# Add tests that don't have expected data
for test in tests:
if not expected_manifest.has_test(test.id):
expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected_manifest
| mpl-2.0 | 932,387,140,883,712,600 | 35.674286 | 100 | 0.605718 | false |
therewillbecode/ichnaea | ichnaea/data/monitor.py | 1 | 3667 | from collections import defaultdict
from datetime import timedelta
from sqlalchemy import func
from sqlalchemy.orm import load_only
from ichnaea.models import (
ApiKey,
OCIDCell,
)
from ichnaea import util
class ApiKeyLimits(object):
def __init__(self, task, session):
self.task = task
self.session = session
self.redis_client = task.redis_client
self.stats_client = task.stats_client
def __call__(self):
today = util.utcnow().strftime('%Y%m%d')
keys = self.redis_client.keys('apilimit:*:' + today)
if keys:
values = self.redis_client.mget(keys)
keys = [k.decode('utf-8').split(':')[1] for k in keys]
else:
values = []
names = {}
if keys:
query = (self.session.query(ApiKey)
.filter(ApiKey.valid_key.in_(keys))
.options(load_only('shortname')))
for api_key in query.all():
names[api_key.valid_key] = api_key.name
result = {}
for k, v in zip(keys, values):
name = names.get(k, k)
value = int(v)
result[name] = value
self.stats_client.gauge(
'api.limit', value, tags=['key:' + name])
return result
class ApiUsers(object):
def __init__(self, task):
self.task = task
self.redis_client = task.redis_client
self.stats_client = task.stats_client
def __call__(self):
days = {}
today = util.utcnow().date()
for i in range(0, 7):
day = today - timedelta(days=i)
days[i] = day.strftime('%Y-%m-%d')
metrics = defaultdict(list)
result = {}
for key in self.redis_client.scan_iter(
match='apiuser:*', count=100):
_, api_type, api_name, day = key.decode('ascii').split(':')
if day not in days.values():
# delete older entries
self.redis_client.delete(key)
continue
if day == days[0]:
metrics[(api_type, api_name, '1d')].append(key)
metrics[(api_type, api_name, '7d')].append(key)
for parts, keys in metrics.items():
api_type, api_name, interval = parts
value = self.redis_client.pfcount(*keys)
self.stats_client.gauge(
'%s.user' % api_type, value,
tags=['key:%s' % api_name, 'interval:%s' % interval])
result['%s:%s:%s' % parts] = value
return result
class OcidImport(object):
def __init__(self, task, session):
self.task = task
self.session = session
self.stats_client = task.stats_client
def __call__(self):
result = -1
now = util.utcnow()
query = self.session.query(func.max(OCIDCell.created))
max_created = query.first()[0]
if max_created:
# diff between now and the value, in milliseconds
diff = now - max_created
result = (diff.days * 86400 + diff.seconds) * 1000
self.stats_client.gauge('table', result, tags=['table:ocid_cell_age'])
return result
class QueueSize(object):
def __init__(self, task):
self.task = task
self.redis_client = task.redis_client
self.stats_client = task.stats_client
def __call__(self):
result = {}
for name in self.task.app.all_queues:
result[name] = value = self.redis_client.llen(name)
self.stats_client.gauge('queue', value, tags=['queue:' + name])
return result
| apache-2.0 | -2,295,239,486,701,998,600 | 29.057377 | 78 | 0.533951 | false |
audunv/andp | python/andp/view/web/widgets.py | 1 | 10766 | # -*- coding: utf-8; -*-
# Copyright (C) 2009 Østfold University College
#
# This file is part of ANDP.
#
# ANDP is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
"""
This module contains generic widgets for use on web pages.
A widget typically represents an input, and includes functionality for
parsing and validating data.
"""
import time, os, datetime
from mod_python import apache
import andp.view.web
class Widget(object):
"""
Abstract base class for all widgets.
"""
def __init__(self, parent, name, value = None):
self.parent = parent
self.name = name
self.value = value
def ParseInput(self, form):
"""
Parses form data, and returns tuple (status, data, message)
status: True if input data were valid, False otherwise
data: Parsed input data
message: Optional error message to be displayed to end user.
Empty string if everything's OK
"""
return (True, None, "")
class TimeWidget(Widget):
"""
A widget that lets user select a time.
"""
def __init__(self, parent, name, value = None):
super(self.__class__, self).__init__(parent, name, value)
if not self.value:
self.value = time.localtime()[3:5]
def GetHTML(self, form = None, ampm = False):
if form:
defHour = int(form.get(self.name + "_hour", ""))
defMin = int(form.get(self.name + "_min", ""))
try:
defAMPM = form[self.name + "_ampm"].value
except KeyError:
defAMPM = None
else:
if ampm:
if self.value[0] < 12:
defHour, defMin = self.value
defAMPM = "am"
else:
defHour = self.value[0] - 12
defMin = self.value[1]
defAMPM = "pm"
else:
defHour, defMin = self.value
defAMPM = False
html = '<select name="%s_hour" id="%s_hour">' % (self.name, self.name)
if ampm:
upperHour = 12
else:
upperHour = 24
for hour in xrange(0, upperHour):
if ampm and hour == 0:
showHour = 12
else:
showHour = hour
if hour == defHour:
html += '<option value="%02i" selected="1">%02i</option>' % (hour, showHour)
else:
html += '<option value="%02i">%02i</option>' % (hour, showHour)
html += '</select>'
html += ':'
html += '<select name="%s_min" id="%s_min">' % (self.name, self.name)
for mint in xrange(0, 60, 5):
# In case we get a value that isn't a multiple of five (shouldn't happen)
if mint == (defMin / 5) * 5:
html += '<option value="%02i" selected="1">%02i</option>' % (mint, mint)
else:
html += '<option value="%02i">%02i</option>' % (mint, mint)
html += '</select>\n'
if ampm:
html += '<select name="%s_ampm" id="%s_ampm">' % (self.name, self.name)
for ampmTxt in ["am", "pm"]:
if ampmTxt == defAMPM:
html += '<option value="%s" selected="1">%s</option>' % (ampmTxt, ampmTxt.upper())
else:
html += '<option value="%s">%s</option>' % (ampmTxt, ampmTxt.upper())
html += '</select>\n'
return html
def ParseInput(self, form):
try:
hourS = form[self.name + "_hour"].value
mintS = form[self.name + "_min"].value
except KeyError:
return (False, None, "You must specify a time")
try:
ampm = form[self.name + "_ampm"].value
except KeyError:
ampm = None
try:
hour = int(hourS)
mint = int(mintS)
except ValueError:
return (False, None, "Invalid time")
if ampm == "pm":
hour += 12
if hour < 0 or hour > 23 or mint < 0 or mint > 59:
return (False, None, "Invalid time")
return (True, (hour, mint, 0), "")
class DateWidget(Widget):
"""
Allows user to select a date.
"""
monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
shortMonthNames = [name[:3] for name in monthNames]
def __init__(self, parent, name, value = None):
super(self.__class__, self).__init__(parent, name, value)
if not self.value:
self.value = time.localtime()[:3]
def GetHTML(self, form = None):
if form:
defYear = int(form.get(self.name + "_year", ""))
defMonth = int(form.get(self.name + "_month", ""))
defDay = int(form.get(self.name + "_day", ""))
else:
defYear, defMonth, defDay = self.value
html = '<select name="%s_day" id="%s_day">\n' % (self.name, self.name)
for day in xrange(1, 32):
if day == defDay:
html += ' <option value="%i" selected="1">%i</option>\n' % (day, day)
else:
html += ' <option value="%i">%i</option>\n' % (day, day)
html += '</select>\n'
html += '<select name="%s_month" id="%s_month">\n' % (self.name, self.name)
for i in xrange(len(self.monthNames)):
monthName = self.monthNames[i]
if i + 1 == defMonth:
html += ' <option value="%i" selected="1">%s</option>\n' % (i + 1, monthName)
else:
html += ' <option value="%i">%s</option>\n' % (i + 1, monthName)
html += '</select>\n'
firstYear = time.gmtime(time.time() - 24 * 3600)[0]
html += '<select name="%s_year" id="%s_year">\n' % (self.name, self.name)
for year in xrange(firstYear, firstYear + 2):
if year == defYear:
html += ' <option value="%i" selected="1">%04i</option>\n' % (year, year)
else:
html += ' <option value="%i">%04i</option>\n' % (year, year)
html += '</select>\n'
return html
def ParseInput(self, form):
try:
dayS = form[self.name + "_day"].value
monthS = form[self.name + "_month"].value
yearS = form[self.name + "_year"].value
except KeyError:
return (False, None, "You must specify a date")
try:
day = int(dayS)
month = int(monthS)
year = int(yearS)
except ValueError:
return (False, None, "Invalid date")
if day < 1 or day > 31 or month < 1 or month > 12:
return (False, None, "Invalid date")
return (True, (year, month, day), "")
class SelectWidget(Widget):
def __init__(self, parent, name, value = None, options = []):
super(self.__class__, self).__init__(parent, name, value)
self.options = options
def GetHTML(self, form = None):
if form:
selected = form.get(self.name, None)
else:
selected = self.value
html = '<select name="%s" id="%s">\n' % (self.name, self.name)
for option, label in self.options:
if option == selected:
html += ' <option value="%s" selected="1">%s</option>\n' % (option, label)
else:
html += ' <option value="%s">%s</option>\n' % (option, label)
html += '</select>\n'
return html
def ParseInput(self, form):
return (True, form[self.name].value, "")
class RadioWidget(Widget):
def __init__(self, parent, name, value = None, options = []):
super(self.__class__, self).__init__(parent, name, value)
self.options = options
self.value = value
def GetHTML(self, form = None):
if form:
selected = form.get(self.name, "")
else:
if self.value == None:
selected = self.options[0][0]
else:
selected = self.value
inputs = []
for option, label in self.options:
if option == selected:
inputs.append('<input type="radio" name="%s" value="%s" checked="1" />%s\n' % (self.name, option, label))
else:
inputs.append('<input type="radio" name="%s" value="%s" />%s\n' % (self.name, option, label))
return "\n<br/>".join(inputs)
def ParseInput(self, form):
return (True, form[self.name].value, "")
class TextWidget(Widget):
"""
A simple one-line or multi-line textbox widget
"""
def __init__(self, req, name, value = "", required = False, errMsg = "Field is required", maxLen = 64, cols = 20, rows = 1):
super(TextWidget, self).__init__(req, name, value = value)
self.required = required
self.errMsg = errMsg
self.maxLen = maxLen
self.cols = cols
self.rows = rows
def GetHTML(self, form = None):
EH = andp.view.web.EH
if form:
try:
value = form[self.name].value
except KeyError:
value = self.value
else:
value = self.value
if self.rows > 1:
return '<textarea name="%s" cols="%i" rows="%i">%s</textarea>' % (self.name, self.cols, self.rows, EH(value))
else:
return '<input type="text" name="%s" value="%s" size="%i" />' % (self.name, EH(value), self.cols)
def ParseInput(self, form):
try:
value = form[self.name].value
except KeyError:
value = ""
if self.required and not value:
return (False, None, self.errMsg)
if len(value) > self.maxLen:
return (False, None, 'Too long (max %i characters)' % self.maxLen)
return (True, value, "")
| gpl-2.0 | -5,732,348,103,663,039,000 | 31.327327 | 139 | 0.511287 | false |
s0enke/boto | tests/integration/cloudformation/test_cert_verification.py | 126 | 1588 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.cloudformation
class CloudFormationCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
cloudformation = True
regions = boto.cloudformation.regions()
def sample_service_call(self, conn):
conn.describe_stacks()
| mit | -2,095,200,390,268,686,000 | 39.717949 | 89 | 0.771411 | false |
GDGND/evm | allauth/account/auth_backends.py | 57 | 2101 | from django.contrib.auth.backends import ModelBackend
from ..utils import get_user_model
from .utils import filter_users_by_email
from .app_settings import AuthenticationMethod
from . import app_settings
class AuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
ret = None
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
ret = self._authenticate_by_email(**credentials)
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME_EMAIL:
ret = self._authenticate_by_email(**credentials)
if not ret:
ret = self._authenticate_by_username(**credentials)
else:
ret = self._authenticate_by_username(**credentials)
return ret
def _authenticate_by_username(self, **credentials):
username_field = app_settings.USER_MODEL_USERNAME_FIELD
username = credentials.get('username')
password = credentials.get('password')
User = get_user_model()
if not username_field or username is None or password is None:
return None
try:
# Username query is case insensitive
query = {username_field+'__iexact': username}
user = User.objects.get(**query)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def _authenticate_by_email(self, **credentials):
# Even though allauth will pass along `email`, other apps may
# not respect this setting. For example, when using
# django-tastypie basic authentication, the login is always
# passed as `username`. So let's place nice with other apps
# and use username as fallback
User = get_user_model()
email = credentials.get('email', credentials.get('username'))
if email:
for user in filter_users_by_email(email):
if user.check_password(credentials["password"]):
return user
return None
| mit | -1,464,291,154,103,092,700 | 36.517857 | 76 | 0.630176 | false |
SebastianLloret/CSCI-1310 | Assignments/Assignment8_Lloret/Assignment8_Lloret.py | 1 | 1401 | '''
Name: Sebastian Lloret
Recitation TA: Brennan Mcconnell
Assignment #: 8
'''
# Used to properly break the file into rows
import csv
def CreateDictionary(fileName):
slangDictionary = {}
# With just ensures a resource is cleaned even if exceptions are thrown.
# I had to use "rU" for universal newline support since textToEnglish is
# formatted with /r and not /n
with open(fileName, "rU") as f:
reader = csv.reader(f)
for row in reader:
# Row is a n-element list where n is the number of columns in each row
# ex. Row = [slang, translation]
slangDictionary[row[0]] = row[1]
return slangDictionary
if __name__ == "__main__":
# Call the function above
slangDictionary = CreateDictionary("textToEnglish.csv")
playing = True
# Grab user input(s)
while playing == True:
userInput = raw_input("Enter text abbreviations separated by spaces, or q to quit.\n")
if userInput == "q":
raise SystemExit
# Split the userInput at every space
arrayInputs = userInput.split()
# For every element in the array of inputs, check if we have the translation
for element in arrayInputs:
if element in slangDictionary:
print slangDictionary[element]
else:
print "NF"
| gpl-3.0 | 4,026,933,840,228,776,400 | 34.868421 | 94 | 0.613847 | false |
ewindisch/nova | nova/api/openstack/compute/contrib/console_output.py | 5 | 3746 | # Copyright 2011 OpenStack Foundation
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'console_output')
class ConsoleOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ConsoleOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('os-getConsoleOutput')
def get_console_output(self, req, id, body):
"""Get text console output."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
try:
length = body['os-getConsoleOutput'].get('length')
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest(_('os-getConsoleOutput malformed '
'or missing from request body'))
if length is not None:
try:
# NOTE(maurosr): cast length into a string before cast into an
# integer to avoid thing like: int(2.5) which is 2 instead of
# raise ValueError like it would when we try int("2.5"). This
# can be removed once we have api validation landed.
int(str(length))
except ValueError:
raise webob.exc.HTTPBadRequest(_('Length in request body must '
'be an integer value'))
try:
output = self.compute_api.get_console_output(context,
instance,
length)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Unable to get console'))
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to get console log, functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
# XML output is not correctly escaped, so remove invalid characters
remove_re = re.compile('[\x00-\x08\x0B-\x1F]')
output = remove_re.sub('', output)
return {'output': output}
class Console_output(extensions.ExtensionDescriptor):
"""Console log output support, with tailing ability."""
name = "ConsoleOutput"
alias = "os-console-output"
namespace = ("http://docs.openstack.org/compute/ext/"
"os-console-output/api/v2")
updated = "2011-12-08T00:00:00+00:00"
def get_controller_extensions(self):
controller = ConsoleOutputController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | 2,368,292,583,006,758,400 | 38.851064 | 79 | 0.626001 | false |
eahneahn/free | djangoproject/core/urls/__init__.py | 1 | 4601 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView, RedirectView
from django.conf import settings
#from django.views.generic.simple import redirect_to, direct_to_template
from django.shortcuts import render,redirect
urlpatterns = patterns('core.views.main_views',
url(r'^$', 'home'),
url(r'^home/$', RedirectView.as_view(url='/', permanent=True)),
url(r'^toggle_layout/$', 'toggle_layout'),
url(r'^stats/$', 'stats'),
url(r'^admail/$', 'admail'),
url(r'^mailtest/$', 'mailtest'),
url(r'^about/$', redirect, {'url': 'http://blog.freedomsponsors.org/about/'}),
url(r'^faq/$', redirect, {'url': 'http://blog.freedomsponsors.org/faq/'}),
url(r'^dev/$', redirect, {'url': 'http://blog.freedomsponsors.org/developers/'}),
url(r'^login/$', 'login'),
url(r'^logout/$', 'logout'),
url(r'^jslic$', render, {'template': 'core/jslic.html'}),
)
urlpatterns += patterns('core.views.issue_views',
url(r'^myissues/$', 'myissues'),
url(r'^issue/$', 'listIssues'),
url(r'^issue/rss$', 'listIssuesFeed'),
url(r'^issue/sponsor/submit$', 'sponsorIssue'),
url(r'^issue/sponsor$', 'addIssueForm'),
url(r'^issue/add/submit$', 'addIssue'),
url(r'^issue/kickstart/submit$', 'kickstartIssue'),
url(r'^issue/add/$', 'addIssueForm'),
url(r'^issue/edit/submit$', 'editIssue'),
url(r'^offer/(?P<offer_id>\d+)/pay$', 'payOfferForm'),
url(r'^offer/pay/submit$', 'payOffer'),
url(r'^issue/(?P<issue_id>\d+)/$', 'viewIssue'),
url(r'^issue/(?P<issue_id>\d+)/.*$', 'viewIssue'),
# url(r'^offer/(?P<offer_id>\d+)/$', 'viewOffer'),
# url(r'^offer/(?P<offer_id>\d+)/.*$', 'viewOffer'),
url(r'^offer/revoke/submit$', 'revokeOffer'),
url(r'^offer/edit/submit$', 'editOffer'),
url(r'^solution/add/submit$', 'addSolution'),
url(r'^solution/abort/submit$', 'abortSolution'),
url(r'^solution/resolve/submit$', 'resolveSolution'),
)
urlpatterns += patterns('',
url(r'^project/$', RedirectView.as_view(url='/project/', permanent=True)),
url(r'^project/(?P<project_id>\d+)/$', RedirectView.as_view(url='/project/%(project_id)s/', permanent=True)),
url(r'^project/(?P<project_id>\d+)/edit$', RedirectView.as_view(url='/project/%(project_id)s/edit', permanent=True)),
)
urlpatterns += patterns('core.views.comment_views',
url(r'^issue/comment/add/submit$', 'addIssueComment'),
url(r'^issue/comment/edit/submit$', 'editIssueComment'),
url(r'^issue/comment/(?P<comment_id>\d+)/history$', 'viewIssueCommentHistory'),
url(r'^offer/comment/(?P<comment_id>\d+)/history$', 'viewOfferCommentHistory'),
)
urlpatterns += patterns('', # TODO: how to use reverse_lazy here?
url(r'^watch/issue/(?P<issue_id>\d+)$', RedirectView.as_view(url='/issue/%(issue_id)s/watch', permanent=True)),
url(r'^unwatch/issue/(?P<issue_id>\d+)$', RedirectView.as_view(url='/issue/%(issue_id)s/unwatch', permanent=True)),
url(r'^watch/offer/(?P<offer_id>\d+)$', RedirectView.as_view(url='/offer/%(offer_id)s/watch', permanent=True)),
url(r'^unwatch/offer/(?P<offer_id>\d+)$', RedirectView.as_view(url='/offer/%(offer_id)s/unwatch', permanent=True)),
)
urlpatterns += patterns('core.views.paypal_views',
url(r'^paypal/cancel$', 'paypalCancel'),
url(r'^paypal/return$', 'paypalReturn'),
url(r'^paypal/'+settings.PAYPAL_IPNNOTIFY_URL_TOKEN+'$', 'paypalIPN'),
)
urlpatterns += patterns('core.views.bitcoin_views',
url(r'^bitcoin/'+settings.BITCOIN_IPNNOTIFY_URL_TOKEN+'$', 'bitcoinIPN'),
)
urlpatterns += patterns('',
url(r'^user/$', RedirectView.as_view(url='/user/', permanent=True)),
url(r'^user/(?P<user_id>\d+)/$', RedirectView.as_view(url='/user/%(user_id)s/', permanent=True)),
url(r'^user/(?P<user_id>\d+)/(?P<user_slug>.*)$', RedirectView.as_view(url='/user/%(user_id)s/%(user_slug)s', permanent=True)),
url(r'^user/edit$', RedirectView.as_view(url='/user/edit', permanent=True)),
)
urlpatterns += patterns('core.views.json_views',
url(r'^json/project$', 'project'),
url(r'^json/by_issue_url$', 'by_issue_url'),
url(r'^json/get_offers$', 'get_offers'),
url(r'^json/list_issue_cards', 'list_issue_cards'),
url(r'^json/add_tag', 'add_tag'),
url(r'^json/remove_tag', 'remove_tag'),
url(r'^json/latest_activity', 'latest_activity'),
url(r'^json/toggle_watch', 'toggle_watch'),
)
# urlpatterns += patterns('core.jiraviews',
# url(r'^issue/sponsor_jira$', 'sponsorJiraForm'),
# )
urlpatterns += patterns('',
url(r'^feedback$', RedirectView.as_view(url='/feedback', permanent=True)),
)
| agpl-3.0 | -2,636,046,931,927,558,000 | 45.474747 | 131 | 0.63747 | false |
godiard/speak | aiml/DefaultSubs.py | 9 | 3590 | """This file contains the default (English) substitutions for the
PyAIML kernel. These substitutions may be overridden by using the
Kernel.loadSubs(filename) method. The filename specified should refer
to a Windows-style INI file with the following format:
# lines that start with '#' are comments
# The 'gender' section contains the substitutions performed by the
# <gender> AIML tag, which swaps masculine and feminine pronouns.
[gender]
he = she
she = he
# and so on...
# The 'person' section contains the substitutions performed by the
# <person> AIML tag, which swaps 1st and 2nd person pronouns.
[person]
I = you
you = I
# and so on...
# The 'person2' section contains the substitutions performed by
# the <person2> AIML tag, which swaps 1st and 3nd person pronouns.
[person2]
I = he
he = I
# and so on...
# the 'normal' section contains subtitutions run on every input
# string passed into Kernel.respond(). It's mainly used to
# correct common misspellings, and to convert contractions
# ("WHAT'S") into a format that will match an AIML pattern ("WHAT
# IS").
[normal]
what's = what is
"""
defaultGender = {
# masculine -> feminine
"he": "she",
"him": "her",
"his": "her",
"himself": "herself",
# feminine -> masculine
"she": "he",
"her": "him",
"hers": "his",
"herself": "himself",
}
defaultPerson = {
# 1st->3rd (masculine)
"I": "he",
"me": "him",
"my": "his",
"mine": "his",
"myself": "himself",
# 3rd->1st (masculine)
"he":"I",
"him":"me",
"his":"my",
"himself":"myself",
# 3rd->1st (feminine)
"she":"I",
"her":"me",
"hers":"mine",
"herself":"myself",
}
defaultPerson2 = {
# 1st -> 2nd
"I": "you",
"me": "you",
"my": "your",
"mine": "yours",
"myself": "yourself",
# 2nd -> 1st
"you": "me",
"your": "my",
"yours": "mine",
"yourself": "myself",
}
# TODO: this list is far from complete
defaultNormal = {
"wanna": "want to",
"gonna": "going to",
"I'm": "I am",
"I'd": "I would",
"I'll": "I will",
"I've": "I have",
"you'd": "you would",
"you're": "you are",
"you've": "you have",
"you'll": "you will",
"he's": "he is",
"he'd": "he would",
"he'll": "he will",
"she's": "she is",
"she'd": "she would",
"she'll": "she will",
"we're": "we are",
"we'd": "we would",
"we'll": "we will",
"we've": "we have",
"they're": "they are",
"they'd": "they would",
"they'll": "they will",
"they've": "they have",
"y'all": "you all",
"can't": "can not",
"cannot": "can not",
"couldn't": "could not",
"wouldn't": "would not",
"shouldn't": "should not",
"isn't": "is not",
"ain't": "is not",
"don't": "do not",
"aren't": "are not",
"won't": "will not",
"weren't": "were not",
"wasn't": "was not",
"didn't": "did not",
"hasn't": "has not",
"hadn't": "had not",
"haven't": "have not",
"where's": "where is",
"where'd": "where did",
"where'll": "where will",
"who's": "who is",
"who'd": "who did",
"who'll": "who will",
"what's": "what is",
"what'd": "what did",
"what'll": "what will",
"when's": "when is",
"when'd": "when did",
"when'll": "when will",
"why's": "why is",
"why'd": "why did",
"why'll": "why will",
"it's": "it is",
"it'd": "it would",
"it'll": "it will",
} | gpl-3.0 | 7,864,459,795,949,373,000 | 22.019231 | 70 | 0.521448 | false |
sanyaade-g2g-repos/key-mon | src/keymon/shaped_window.py | 15 | 3172 | #!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a shaped window to show mouse events.
Thanks to mathias.gumz for the original code.
"""
import gobject
import gtk
import lazy_pixbuf_creator
class ShapedWindow(gtk.Window):
"""Create a window shaped as fname."""
def __init__(self, fname, scale=1.0, timeout=0.2):
gtk.Window.__init__(self)
self.connect('size-allocate', self._on_size_allocate)
self.set_decorated(False)
self.set_keep_above(True)
self.set_accept_focus(False)
self.scale = scale
self.shown = False
self.timeout = timeout
self.timeout_timer = None
self.name_fnames = {
'mouse' : [fname],
}
self.pixbufs = lazy_pixbuf_creator.LazyPixbufCreator(self.name_fnames,
self.scale)
self.pixbuf = self.pixbufs.get('mouse')
self.resize(self.pixbuf.get_width(), self.pixbuf.get_height())
# a pixmap widget to contain the pixmap
self.image = gtk.Image()
bitmap, self.mask = self.pixbuf.render_pixmap_and_mask()
self.image.set_from_pixmap(bitmap, self.mask)
self.image.show()
self.add(self.image)
def _on_size_allocate(self, win, unused_allocation):
"""Called when first allocated."""
# Set the window shape
win.shape_combine_mask(self.mask, 0, 0)
win.set_property('skip-taskbar-hint', True)
if not win.is_composited():
print 'Unable to fade the window'
else:
win.set_opacity(0.5)
def center_on_cursor(self, x=None, y=None):
if x is None or y is None:
root = gtk.gdk.screen_get_default().get_root_window()
x, y, _ = root.get_pointer()
w, h = self.get_size()
new_x, new_y = x - w/2, y - h/2
pos = self.get_position()
if pos[0] != new_x or pos[1] != new_y:
self.move(new_x, new_y)
self.show()
def show(self):
"""Show this mouse indicator and ignore awaiting fade away request."""
if self.timeout_timer and self.shown:
# There is a fade away request, ignore it
gobject.source_remove(self.timeout_timer)
self.timeout_timer = None
# This method only is called when mouse is pressed, so there will be a
# release and fade_away call, no need to set up another timer.
super(ShapedWindow, self).show()
def maybe_show(self):
if self.shown or not self.timeout_timer:
return
self.shown = True
self.show()
def fade_away(self):
"""Make the window fade in a little bit."""
# TODO this isn't doing any fading out
self.shown = False
self.timeout_timer = gobject.timeout_add(int(self.timeout * 1000), self.hide)
| apache-2.0 | -5,034,165,190,098,059,000 | 32.744681 | 81 | 0.659836 | false |
4eek/edx-platform | lms/djangoapps/certificates/views/support.py | 52 | 5649 | """
Certificate end-points used by the student support UI.
See lms/djangoapps/support for more details.
"""
import logging
from functools import wraps
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseServerError
)
from django.views.decorators.http import require_GET, require_POST
from django.db.models import Q
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from student.models import User, CourseEnrollment
from courseware.access import has_access
from util.json_request import JsonResponse
from certificates import api
log = logging.getLogger(__name__)
def require_certificate_permission(func):
"""
View decorator that requires permission to view and regenerate certificates.
"""
@wraps(func)
def inner(request, *args, **kwargs): # pylint:disable=missing-docstring
if has_access(request.user, "certificates", "global"):
return func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return inner
@require_GET
@require_certificate_permission
def search_by_user(request):
"""
Search for certificates for a particular user.
Supports search by either username or email address.
Arguments:
request (HttpRequest): The request object.
Returns:
JsonResponse
Example Usage:
GET /certificates/[email protected]
Response: 200 OK
Content-Type: application/json
[
{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
query = request.GET.get("query")
if not query:
return JsonResponse([])
try:
user = User.objects.get(Q(email=query) | Q(username=query))
except User.DoesNotExist:
return JsonResponse([])
certificates = api.get_certificates_for_user(user.username)
for cert in certificates:
cert["course_key"] = unicode(cert["course_key"])
cert["created"] = cert["created"].isoformat()
cert["modified"] = cert["modified"].isoformat()
return JsonResponse(certificates)
def _validate_regen_post_params(params):
"""
Validate request POST parameters to the regenerate certificates end-point.
Arguments:
params (QueryDict): Request parameters.
Returns: tuple of (dict, HttpResponse)
"""
# Validate the username
try:
username = params.get("username")
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = _("User {username} does not exist").format(username=username)
return None, HttpResponseBadRequest(msg)
# Validate the course key
try:
course_key = CourseKey.from_string(params.get("course_key"))
except InvalidKeyError:
msg = _("{course_key} is not a valid course key").format(course_key=params.get("course_key"))
return None, HttpResponseBadRequest(msg)
return {"user": user, "course_key": course_key}, None
@require_POST
@require_certificate_permission
def regenerate_certificate_for_user(request):
"""
Regenerate certificates for a user.
This is meant to be used by support staff through the UI in lms/djangoapps/support
Arguments:
request (HttpRequest): The request object
Returns:
HttpResponse
Example Usage:
POST /certificates/regenerate
* username: "bob"
* course_key: "edX/DemoX/Demo_Course"
Response: 200 OK
"""
# Check the POST parameters, returning a 400 response if they're not valid.
params, response = _validate_regen_post_params(request.POST)
if response is not None:
return response
# Check that the course exists
course = modulestore().get_course(params["course_key"])
if course is None:
msg = _("The course {course_key} does not exist").format(course_key=params["course_key"])
return HttpResponseBadRequest(msg)
# Check that the user is enrolled in the course
if not CourseEnrollment.is_enrolled(params["user"], params["course_key"]):
msg = _("User {username} is not enrolled in the course {course_key}").format(
username=params["user"].username,
course_key=params["course_key"]
)
return HttpResponseBadRequest(msg)
# Attempt to regenerate certificates
try:
api.regenerate_user_certificates(params["user"], params["course_key"], course=course)
except: # pylint: disable=bare-except
# We are pessimistic about the kinds of errors that might get thrown by the
# certificates API. This may be overkill, but we're logging everything so we can
# track down unexpected errors.
log.exception(
"Could not regenerate certificates for user %s in course %s",
params["user"].id,
params["course_key"]
)
return HttpResponseServerError(_("An unexpected error occurred while regenerating certificates."))
log.info(
"Started regenerating certificates for user %s in course %s from the support page.",
params["user"].id, params["course_key"]
)
return HttpResponse(200)
| agpl-3.0 | 4,506,745,651,908,244,500 | 29.535135 | 106 | 0.653567 | false |
realms-team/solmanager | libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/configuration.py | 3 | 7415 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import base64
import urllib3
try:
import httplib
except ImportError:
# for python3
import http.client as httplib
import sys
import logging
from six import iteritems
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""
Constructor
"""
# Default Base url
self.host = "https://localhost/manager/v1"
# Default api client
self.api_client = None
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("vmanager")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
@property
def logger_file(self):
"""
Gets the logger_file.
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""
Sets the logger_file.
If the logger_file is None, then add stream handler and remove file handler.
Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""
Gets the debug status.
"""
return self.__debug
@debug.setter
def debug(self, value):
"""
Sets the debug status.
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""
Gets the logger_format.
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""
Sets the logger_format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier]
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""
Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\
.get('authorization')
def auth_settings(self):
"""
Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'dust_basic':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
"""
Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.0\n"\
"SDK Package Version: 0.2".\
format(env=sys.platform, pyversion=sys.version)
| bsd-3-clause | -5,936,042,476,031,169,000 | 30.155462 | 97 | 0.586514 | false |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_Cisco_IOS_XE_bgp_oper.py | 1 | 48604 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'BgpAfiSafiEnum' : _MetaInfoEnum('BgpAfiSafiEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'ipv4-mdt':'ipv4_mdt',
'ipv4-multicast':'ipv4_multicast',
'ipv4-unicast':'ipv4_unicast',
'ipv4-mvpn':'ipv4_mvpn',
'ipv4-flowspec':'ipv4_flowspec',
'ipv6-multicast':'ipv6_multicast',
'ipv6-unicast':'ipv6_unicast',
'ipv6-mvpn':'ipv6_mvpn',
'ipv6-flowspec':'ipv6_flowspec',
'l2vpn-vpls':'l2vpn_vpls',
'l2vpn-e-vpn':'l2vpn_e_vpn',
'nsap-unicast':'nsap_unicast',
'rtfilter-unicast':'rtfilter_unicast',
'vpnv4-multicast':'vpnv4_multicast',
'vpnv4-unicast':'vpnv4_unicast',
'vpnv6-unicast':'vpnv6_unicast',
'vpnv6-multicast':'vpnv6_multicast',
'vpnv4-flowspec':'vpnv4_flowspec',
'vpnv6-flowspec':'vpnv6_flowspec',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpOriginCodeEnum' : _MetaInfoEnum('BgpOriginCodeEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'origin-igp':'origin_igp',
'origin-egp':'origin_egp',
'origin-incomplete':'origin_incomplete',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpRpkiStatusEnum' : _MetaInfoEnum('BgpRpkiStatusEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'rpki-valid':'rpki_valid',
'rpki-invalid':'rpki_invalid',
'rpki-not-found':'rpki_not_found',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpModeEnum' : _MetaInfoEnum('BgpModeEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'active':'active',
'passive':'passive',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpRouteOptionEnum' : _MetaInfoEnum('BgpRouteOptionEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'bgp-all-routes':'bgp_all_routes',
'bgp-cidr-only-routes':'bgp_cidr_only_routes',
'bgp-dampened-routes':'bgp_dampened_routes',
'bgp-rib-fail-routes':'bgp_rib_fail_routes',
'bgp-injected-routes':'bgp_injected_routes',
'bgp-pending-routes':'bgp_pending_routes',
'bgp-inconsistent-routes':'bgp_inconsistent_routes',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpFsmStateEnum' : _MetaInfoEnum('BgpFsmStateEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'idle':'idle',
'connect':'connect',
'active':'active',
'opensent':'opensent',
'openconfirm':'openconfirm',
'established':'established',
'nonnegotiated':'nonnegotiated',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpLinkEnum' : _MetaInfoEnum('BgpLinkEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'internal':'internal',
'external':'external',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'TcpFsmStateEnum' : _MetaInfoEnum('TcpFsmStateEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'closed':'closed',
'listen':'listen',
'synsent':'synsent',
'synrcvd':'synrcvd',
'established':'established',
'finwait1':'finwait1',
'finwait2':'finwait2',
'closewait':'closewait',
'lastack':'lastack',
'closing':'closing',
'timewait':'timewait',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers',
False,
[
_MetaInfoClassMember('hold-time', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Hold time
''',
'hold_time',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('keepalive-interval', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' keepalive interval
''',
'keepalive_interval',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'negotiated-keepalive-timers',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent',
False,
[
_MetaInfoClassMember('keepalives', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KEEPALIVE messages
''',
'keepalives',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('notifications', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' NOTIFICATION messages
''',
'notifications',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('opens', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' OPEN messages
''',
'opens',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-refreshes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route refresh messages
''',
'route_refreshes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('updates', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' UPDATE messages
''',
'updates',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'sent',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received',
False,
[
_MetaInfoClassMember('keepalives', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KEEPALIVE messages
''',
'keepalives',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('notifications', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' NOTIFICATION messages
''',
'notifications',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('opens', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' OPEN messages
''',
'opens',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-refreshes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route refresh messages
''',
'route_refreshes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('updates', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' UPDATE messages
''',
'updates',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'received',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters',
False,
[
_MetaInfoClassMember('inq-depth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input Q depth
''',
'inq_depth',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('outq-depth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output Q depth
''',
'outq_depth',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('received', REFERENCE_CLASS, 'Received' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received',
[], [],
''' ''',
'received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('sent', REFERENCE_CLASS, 'Sent' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent',
[], [],
''' ''',
'sent',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-counters',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.Connection' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.Connection',
False,
[
_MetaInfoClassMember('last-reset', ATTRIBUTE, 'str' , None, None,
[], [],
''' since the peering session was last reset
''',
'last_reset',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'BgpModeEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpModeEnum',
[], [],
''' ''',
'mode',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('reset-reason', ATTRIBUTE, 'str' , None, None,
[], [],
''' The reason for the last reset
''',
'reset_reason',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'TcpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'TcpFsmStateEnum',
[], [],
''' TCP FSM state
''',
'state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-dropped', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of times that a valid session has failed
or been taken down
''',
'total_dropped',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-established', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of times a TCP and BGP connection has been
successfully established
''',
'total_established',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'connection',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.Transport' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.Transport',
False,
[
_MetaInfoClassMember('foreign-host', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('foreign-host', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('foreign-host', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('foreign-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Remote port used by the peer for the TCP session
''',
'foreign_port',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('local-host', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('local-host', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('local-host', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('local-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Local TCP port used for TCP session
''',
'local_port',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('mss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Maximum Data segment size
''',
'mss',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('path-mtu-discovery', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ''',
'path_mtu_discovery',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'transport',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity.Sent' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity.Sent',
False,
[
_MetaInfoClassMember('bestpaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as best paths
''',
'bestpaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('current-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Current number of prefixes accepted
''',
'current_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('explicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of times that a prefix has been withdrawn
because it is no longer feasible
''',
'explicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('implicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' number of times that a prefix has been withdrawn
and readvertised
''',
'implicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('multipaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as multipaths
''',
'multipaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total number of prefixes accepted
''',
'total_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'sent',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity.Received' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity.Received',
False,
[
_MetaInfoClassMember('bestpaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as best paths
''',
'bestpaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('current-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Current number of prefixes accepted
''',
'current_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('explicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of times that a prefix has been withdrawn
because it is no longer feasible
''',
'explicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('implicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' number of times that a prefix has been withdrawn
and readvertised
''',
'implicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('multipaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as multipaths
''',
'multipaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total number of prefixes accepted
''',
'total_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'received',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity',
False,
[
_MetaInfoClassMember('received', REFERENCE_CLASS, 'Received' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity.Received',
[], [],
''' ''',
'received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('sent', REFERENCE_CLASS, 'Sent' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity.Sent',
[], [],
''' ''',
'sent',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'prefix-activity',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor',
False,
[
_MetaInfoClassMember('afi-safi', REFERENCE_ENUM_CLASS, 'BgpAfiSafiEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpAfiSafiEnum',
[], [],
''' ''',
'afi_safi',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'vrf_name',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('neighbor-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'neighbor_id',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('bgp-neighbor-counters', REFERENCE_CLASS, 'BgpNeighborCounters' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters',
[], [],
''' ''',
'bgp_neighbor_counters',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-version', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' BGP version being used to communicate with the
remote router
''',
'bgp_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('connection', REFERENCE_CLASS, 'Connection' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.Connection',
[], [],
''' ''',
'connection',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'description',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('installed-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of installed prefixes
''',
'installed_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('last-read', ATTRIBUTE, 'str' , None, None,
[], [],
''' since BGP last received a message to this neighbor
''',
'last_read',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('last-write', ATTRIBUTE, 'str' , None, None,
[], [],
''' since BGP last sent a message from this neighbor
''',
'last_write',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('link', REFERENCE_ENUM_CLASS, 'BgpLinkEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpLinkEnum',
[], [],
''' ''',
'link',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('negotiated-cap', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Information for bgp neighbor session negotiated
capabilities
''',
'negotiated_cap',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('negotiated-keepalive-timers', REFERENCE_CLASS, 'NegotiatedKeepaliveTimers' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers',
[], [],
''' ''',
'negotiated_keepalive_timers',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefix-activity', REFERENCE_CLASS, 'PrefixActivity' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity',
[], [],
''' ''',
'prefix_activity',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('session-state', REFERENCE_ENUM_CLASS, 'BgpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpFsmStateEnum',
[], [],
''' ''',
'session_state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('transport', REFERENCE_CLASS, 'Transport' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.Transport',
[], [],
''' ''',
'transport',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('up-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' How long the bgp session has been up since
the sessioin was established
''',
'up_time',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors',
False,
[
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor',
[], [],
''' ''',
'neighbor',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'neighbors',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Prefixes' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Prefixes',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'prefixes',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Path' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Path',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'path',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.AsPath' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.AsPath',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'as-path',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.RouteMap' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.RouteMap',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'route-map',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.FilterList' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.FilterList',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'filter-list',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Activities' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Activities',
False,
[
_MetaInfoClassMember('paths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'paths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('scan-interval', ATTRIBUTE, 'str' , None, None,
[], [],
''' scan interval in second
''',
'scan_interval',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'activities',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary',
False,
[
_MetaInfoClassMember('id', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'id',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('bgp-version', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ''',
'bgp_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('input-queue', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'input_queue',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('messages-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'messages_received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('messages-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'messages_sent',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('output-queue', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'output_queue',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'prefixes_received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'BgpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpFsmStateEnum',
[], [],
''' ''',
'state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('up-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'up_time',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-summary',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries',
False,
[
_MetaInfoClassMember('bgp-neighbor-summary', REFERENCE_LIST, 'BgpNeighborSummary' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary',
[], [],
''' ''',
'bgp_neighbor_summary',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-summaries',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily',
False,
[
_MetaInfoClassMember('afi-safi', REFERENCE_ENUM_CLASS, 'BgpAfiSafiEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpAfiSafiEnum',
[], [],
''' ''',
'afi_safi',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'vrf_name',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('activities', REFERENCE_CLASS, 'Activities' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Activities',
[], [],
''' BGP activity information
''',
'activities',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('as-path', REFERENCE_CLASS, 'AsPath' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.AsPath',
[], [],
''' ''',
'as_path',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-neighbor-summaries', REFERENCE_CLASS, 'BgpNeighborSummaries' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries',
[], [],
''' Summary of neighbor
''',
'bgp_neighbor_summaries',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' BGP table version number
''',
'bgp_table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('filter-list', REFERENCE_CLASS, 'FilterList' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.FilterList',
[], [],
''' ''',
'filter_list',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('path', REFERENCE_CLASS, 'Path' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Path',
[], [],
''' ''',
'path',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes', REFERENCE_CLASS, 'Prefixes' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Prefixes',
[], [],
''' ''',
'prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-map', REFERENCE_CLASS, 'RouteMap' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.RouteMap',
[], [],
''' ''',
'route_map',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('router-id', REFERENCE_UNION, 'str' , None, None,
[], [],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('router-id', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('router-id', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('routing-table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Routing table version number
''',
'routing_table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-memory', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'total_memory',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'address-family',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_LIST, 'AddressFamily' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily',
[], [],
''' ''',
'address_family',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'address-families',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState' : {
'meta_info' : _MetaInfoClass('BgpState',
False,
[
_MetaInfoClassMember('address-families', REFERENCE_CLASS, 'AddressFamilies' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies',
[], [],
''' ''',
'address_families',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('neighbors', REFERENCE_CLASS, 'Neighbors' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors',
[], [],
''' ''',
'neighbors',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-state',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
}
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity.Sent']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity.Received']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.Connection']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.Transport']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor']['meta_info'].parent =_meta_table['BgpState.Neighbors']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Prefixes']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Path']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.AsPath']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.RouteMap']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.FilterList']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Activities']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info'].parent =_meta_table['BgpState.AddressFamilies']['meta_info']
_meta_table['BgpState.Neighbors']['meta_info'].parent =_meta_table['BgpState']['meta_info']
_meta_table['BgpState.AddressFamilies']['meta_info'].parent =_meta_table['BgpState']['meta_info']
| apache-2.0 | 29,991,553,735,175,612 | 49.471443 | 232 | 0.482738 | false |
tcmitchell/geni-tools | src/gcf/geni/am/aggregate.py | 3 | 3386 | #----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
from .resource import Resource
class Aggregate(object):
def __init__(self):
self.resources = []
self.containers = {} # of resources, not slivers
def add_resources(self, resources):
self.resources.extend(resources)
def catalog(self, container=None):
if container:
if container in self.containers:
return self.containers[container]
else:
return []
else:
return self.resources
def allocate(self, container, resources):
if container not in self.containers:
self.containers[container] = []
for r in resources:
self.containers[container].append(r)
def deallocate(self, container, resources):
if container and not self.containers.has_key(container):
# Be flexible: if a container is specified but unknown
# ignore the call
return
if container and resources:
# deallocate the given resources from the container
for r in resources:
self.containers[container].remove(r)
elif container:
# deallocate all the resources in the container
container_resources = list(self.containers[container])
for r in container_resources:
self.containers[container].remove(r)
elif resources:
# deallocate the resources from their container
for r in resources:
for c in self.containers.values():
if r in c:
c.remove(r)
# Finally, check if container is empty. If so, delete it.
# Note cache the keys because we will be modifying the dict
# inside the loop
allkeys = self.containers.keys()
for k in allkeys:
if not self.containers[k]:
del self.containers[k]
def stop(self, container):
# Mark the resources as 'SHUTDOWN'
if container in self.containers:
for r in self.containers[container]:
r.status = Resource.STATUS_SHUTDOWN
| mit | -7,270,206,963,745,568,000 | 39.309524 | 72 | 0.620496 | false |
nicememory/pie | pyglet/pyglet/gl/glxext_mesa.py | 46 | 2050 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''This file is currently hand-coded; I don't have a MESA header file to build
off.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
glXSwapIntervalMESA = _link_function('glXSwapIntervalMESA', c_int, [c_int], 'MESA_swap_control')
| apache-2.0 | -3,048,396,163,659,811,000 | 44.555556 | 96 | 0.704878 | false |
jpmpentwater/cvxpy | examples/extensions/ncvx/branch_and_bound.py | 12 | 3946 | import cvxopt
import cvxpy.problems.problem as problem
import cvxpy.settings as s
from boolean import Boolean
def branch(booleans):
bool_vals = (b for b in booleans if not b.fix_values)
# pick *a* boolean variable to branch on
# choose the most ambivalent one (smallest distance to 0.5)
# NOTE: if there are no boolean variables, will never branch
return min(bool_vals, key=lambda x: abs(x.value - 0.5))
def bound(prob, booleans):
# relax boolean constraints
for bool_var in booleans: bool_var.relax()
# solves relaxation
lower_bound = prob._solve()
if isinstance(lower_bound, str):
lower_bound = float('inf')
# round boolean variables and re-solve to obtain upper bound
for bool_var in booleans: bool_var.round()
upper_bound = prob._solve()
if isinstance(upper_bound, str):
upper_bound = float('inf')
return {'gap': upper_bound - lower_bound,
'ub': upper_bound,
'lb': lower_bound,
'obj': upper_bound,
'sol': map(lambda x: x.value, booleans)}
def solve_wrapper(prob, i, booleans, depth, epsilon):
if i > depth: return None
# branch
branch_var = branch(booleans)
# try true branch
branch_var.set(True)
true_branch = bound(prob, booleans)
# try false branch
branch_var.set(False)
false_branch = bound(prob, booleans)
# keep track of best objective so far
if true_branch['obj'] < false_branch['obj']:
solution = true_branch
else:
solution = false_branch
# update the bound
solution['lb'] = min(true_branch['lb'],false_branch['lb'])
solution['ub'] = min(true_branch['ub'],false_branch['ub'])
# check if gap is small enough
solution['gap'] = solution['ub'] - solution['lb']
if solution['gap'] < epsilon:
branch_var.unset()
return solution
# if the gap isn't small enough, we will choose a branch to go down
def take_branch(true_or_false):
branch_var.set(true_or_false)
if true_or_false is True: branch_bools = true_branch['sol']
else: branch_bools = false_branch['sol']
# restore the values into the set of booleans
for b, value in zip(booleans,branch_bools):
b.save_value(value)
return solve_wrapper(prob, i+1, booleans, depth, epsilon)
# partition based on lower bounds
if true_branch['lb'] < false_branch['lb']:
true_subtree = take_branch(True)
false_subtree = take_branch(False)
else:
false_subtree = take_branch(False)
true_subtree = take_branch(True)
# propagate best solution up the tree
if true_subtree and false_subtree:
if true_subtree['obj'] < false_subtree['obj']:
return true_subtree
return false_subtree
if not false_subtree and true_subtree: return true_subtree
if not true_subtree and false_subtree: return false_subtree
# return best guess so far
return solution
def branch_and_bound(self, depth=5, epsilon=1e-3):
objective, constr_map = self.canonicalize()
dims = self._format_for_solver(constr_map, s.ECOS)
variables = self.objective.variables()
for constr in self.constraints:
variables += constr.variables()
booleans = [v for v in variables if isinstance(v, Boolean)]
self.constraints.extend(b._LB <= b for b in booleans)
self.constraints.extend(b <= b._UB for b in booleans)
result = bound(self, booleans)
# check if gap is small enough
if result['gap'] < epsilon:
return result['obj']
result = solve_wrapper(self, 0, booleans, depth, epsilon)
# set the boolean values to the solution
for b, value in zip(booleans, result['sol']):
b.save_value(value)
b.fix_values = cvxopt.matrix(True, b.size)
return result['obj']
# add branch and bound a solution method
problem.Problem.register_solve("branch and bound", branch_and_bound)
| gpl-3.0 | -6,258,885,580,991,344,000 | 31.883333 | 71 | 0.645971 | false |
Julian/urwid | examples/input_test.py | 11 | 3001 | #!/usr/bin/python
#
# Urwid keyboard input test app
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Keyboard test application
"""
import urwid.curses_display
import urwid.raw_display
import urwid.web_display
import urwid
import sys
if urwid.web_display.is_web_request():
Screen = urwid.web_display.Screen
else:
if len(sys.argv)>1 and sys.argv[1][:1] == "r":
Screen = urwid.raw_display.Screen
else:
Screen = urwid.curses_display.Screen
def key_test():
screen = Screen()
header = urwid.Text("Values from get_input(). Q exits.")
header = urwid.AttrWrap(header,'header')
lw = urwid.SimpleListWalker([])
listbox = urwid.ListBox(lw)
listbox = urwid.AttrWrap(listbox, 'listbox')
top = urwid.Frame(listbox, header)
def input_filter(keys, raw):
if 'q' in keys or 'Q' in keys:
raise urwid.ExitMainLoop
t = []
a = []
for k in keys:
if type(k) == tuple:
out = []
for v in k:
if out:
out += [', ']
out += [('key',repr(v))]
t += ["("] + out + [")"]
else:
t += ["'",('key',k),"' "]
rawt = urwid.Text(", ".join(["%d"%r for r in raw]))
if t:
lw.append(
urwid.Columns([
('weight',2,urwid.Text(t)),
rawt])
)
listbox.set_focus(len(lw)-1,'above')
return keys
loop = urwid.MainLoop(top, [
('header', 'black', 'dark cyan', 'standout'),
('key', 'yellow', 'dark blue', 'bold'),
('listbox', 'light gray', 'black' ),
], screen, input_filter=input_filter)
try:
old = screen.tty_signal_keys('undefined','undefined',
'undefined','undefined','undefined')
loop.run()
finally:
screen.tty_signal_keys(*old)
def main():
urwid.web_display.set_preferences('Input Test')
if urwid.web_display.handle_short_request():
return
key_test()
if '__main__'==__name__ or urwid.web_display.is_web_request():
main()
| lgpl-2.1 | 2,864,991,766,714,025,500 | 28.421569 | 78 | 0.564812 | false |
vasyarv/edx-platform | lms/djangoapps/instructor_task/tasks.py | 12 | 12056 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
At present, these tasks all operate on StudentModule objects in one way or another,
so they share a visitor architecture. Each task defines an "update function" that
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
A task may optionally specify a "filter function" that takes a query for StudentModule
objects, and adds additional filter clauses.
A task also passes through "xmodule_instance_args", that are used to provide
information to our code that instantiates xmodule instances.
The task definition then calls the traversal function, passing in the three arguments
above, along with the id value for an InstructorTask object. The InstructorTask
object contains a 'task_input' row which is a JSON-encoded dict containing
a problem URL and optionally a student. These are used to set up the initial value
of the query for traversing StudentModule objects.
"""
import logging
from functools import partial
from django.conf import settings
from django.utils.translation import ugettext_noop
from celery import task
from bulk_email.tasks import perform_delegate_email_batches
from instructor_task.tasks_helper import (
run_main_task,
BaseInstructorTask,
perform_module_state_update,
rescore_problem_module_state,
reset_attempts_module_state,
delete_problem_module_state,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
cohort_students_and_upload,
upload_enrollment_report,
upload_may_enroll_csv,
upload_exec_summary_report,
generate_students_certificates,
upload_proctored_exam_results_report
)
TASK_LOG = logging.getLogger('edx.celery.task')
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def rescore_problem(entry_id, xmodule_instance_args):
"""Rescores a problem in a course, for all students or one specific student.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions for the problem will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('rescored')
update_fcn = partial(rescore_problem_module_state, xmodule_instance_args)
def filter_fcn(modules_to_update):
"""Filter that matches problems which are marked as being done"""
return modules_to_update.filter(state__contains='"done": true')
visit_fcn = partial(perform_module_state_update, update_fcn, filter_fcn)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def reset_problem_attempts(entry_id, xmodule_instance_args):
"""Resets problem attempts to zero for a particular problem for all students in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('reset')
update_fcn = partial(reset_attempts_module_state, xmodule_instance_args)
visit_fcn = partial(perform_module_state_update, update_fcn, None)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def delete_problem_state(entry_id, xmodule_instance_args):
"""Deletes problem state entirely for all students on a particular problem in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('deleted')
update_fcn = partial(delete_problem_module_state, xmodule_instance_args)
visit_fcn = partial(perform_module_state_update, update_fcn, None)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def send_bulk_course_email(entry_id, _xmodule_instance_args):
"""Sends emails to recipients enrolled in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'email_id': the full URL to the problem to be rescored. (required)
`_xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance. This is unused here.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('emailed')
visit_fcn = perform_delegate_email_batches
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_grades_csv(entry_id, xmodule_instance_args):
"""
Grade a course and push the results to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(upload_grades_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_problem_grade_report(entry_id, xmodule_instance_args):
"""
Generate a CSV for a course containing all students' problem
grades and push the results to an S3 bucket for download.
"""
# Translators: This is a past-tense phrase that is inserted into task progress messages as {action}.
action_name = ugettext_noop('problem distribution graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(upload_problem_grade_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_students_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_students_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def enrollment_report_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generating_enrollment_report')
task_fn = partial(upload_enrollment_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def exec_summary_report_csv(entry_id, xmodule_instance_args):
"""
Compute executive summary report for a course and upload the
Html generated report to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = 'generating_exec_summary_report'
task_fn = partial(upload_exec_summary_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def proctored_exam_results_csv(entry_id, xmodule_instance_args):
"""
Compute proctored exam results report for a course and upload the
CSV for download.
"""
action_name = 'generating_proctored_exam_results_report'
task_fn = partial(upload_proctored_exam_results_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_may_enroll_csv(entry_id, xmodule_instance_args):
"""
Compute information about invited students who have not enrolled
in a given course yet and upload the CSV to an S3 bucket for
download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_may_enroll_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def generate_certificates(entry_id, xmodule_instance_args):
"""
Grade students and generate certificates.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('certificates generated')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(generate_students_certificates, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=E1102
def cohort_students(entry_id, xmodule_instance_args):
"""
Cohort students in bulk, and upload the results.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
# An example of such a message is: "Progress: {action} {succeeded} of {attempted} so far"
action_name = ugettext_noop('cohorted')
task_fn = partial(cohort_students_and_upload, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
| agpl-3.0 | -522,893,223,179,021,700 | 44.323308 | 112 | 0.73565 | false |
MaximLich/oppia | core/tests/performance_tests/splash_test.py | 16 | 1597 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance tests for the splash page."""
from core.tests.performance_tests import base
from core.tests.performance_tests import test_config
class SplashPagePerformanceTest(base.TestBase):
"""Performance tests for the splash page."""
PAGE_KEY = test_config.PAGE_KEY_SPLASH
def setUp(self):
super(SplashPagePerformanceTest, self).setUp()
page_config = test_config.TEST_DATA[self.PAGE_KEY]
self._set_page_config(page_config)
self._initialize_data_fetcher()
self._load_page_to_cache_server_resources()
def test_page_size_under_specified_limit(self):
self._test_total_page_size()
def test_page_size_under_specified_limit_for_cached_session(self):
self._test_total_page_size_for_cached_session()
def test_page_loads_under_specified_limit(self):
self._test_page_load_time()
def test_page_loads_under_specified_limit_cached_session(self):
self._test_page_load_time_for_cached_session()
| apache-2.0 | -8,230,573,262,685,003,000 | 35.295455 | 74 | 0.722605 | false |
hmgaudecker/econ-project-templates | {{cookiecutter.project_slug}}/.mywaflib/waflib/Tools/cs.py | 55 | 6397 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
"""
C# support. A simple example::
def configure(conf):
conf.load('cs')
def build(bld):
bld(features='cs', source='main.cs', gen='foo')
Note that the configuration may compile C# snippets::
FRAG = '''
namespace Moo {
public class Test { public static int Main(string[] args) { return 0; } }
}'''
def configure(conf):
conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe',
bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support')
"""
from waflib import Utils, Task, Options, Errors
from waflib.TaskGen import before_method, after_method, feature
from waflib.Tools import ccroot
from waflib.Configure import conf
ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES'])
ccroot.lib_patterns['csshlib'] = ['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
"""
Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator.
"""
cs_nodes = []
no_nodes = []
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source = no_nodes
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen))
tsk.env.CSTYPE = '/target:%s' % bintype
tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu'))
inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}')
if inst_to:
# note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically
mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644)
self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
"""
C# applications honor the **use** keyword::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib')
bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi')
"""
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS', '/reference:%s' % x)
continue
y.post()
tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r' % self)
self.cs_task.dep_nodes.extend(tsk.outputs) # dependency
self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs)
self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs', 'use_cs')
def debug_cs(self):
"""
The C# targets may create .mdb or .pdb files::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full')
# csdebug is a value in (True, 'full', 'pdbonly')
"""
csdebug = getattr(self, 'csdebug', self.env.CSDEBUG)
if not csdebug:
return
node = self.cs_task.outputs[0]
if self.env.CS_NAME == 'mono':
out = node.parent.find_or_declare(node.name + '.mdb')
else:
out = node.change_ext('.pdb')
self.cs_task.outputs.append(out)
if getattr(self, 'install_task', None):
self.pdb_install_task = self.add_install_files(
install_to=self.install_task.install_to, install_from=out)
if csdebug == 'pdbonly':
val = ['/debug+', '/debug:pdbonly']
elif csdebug == 'full':
val = ['/debug+', '/debug:full']
else:
val = ['/debug-']
self.env.append_value('CSFLAGS', val)
@feature('cs')
@after_method('debug_cs')
def doc_cs(self):
"""
The C# targets may create .xml documentation files::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True)
# csdoc is a boolean value
"""
csdoc = getattr(self, 'csdoc', self.env.CSDOC)
if not csdoc:
return
node = self.cs_task.outputs[0]
out = node.change_ext('.xml')
self.cs_task.outputs.append(out)
if getattr(self, 'install_task', None):
self.doc_install_task = self.add_install_files(
install_to=self.install_task.install_to, install_from=out)
self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath())
class mcs(Task.Task):
"""
Compile C# files
"""
color = 'YELLOW'
run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def split_argfile(self, cmd):
inline = [cmd[0]]
infile = []
for x in cmd[1:]:
# csc doesn't want /noconfig in @file
if x.lower() == '/noconfig':
inline.append(x)
else:
infile.append(self.quote_flag(x))
return (inline, infile)
def configure(conf):
"""
Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc)
"""
csc = getattr(Options.options, 'cscbinary', None)
if csc:
conf.env.MCS = csc
conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS')
conf.env.ASS_ST = '/r:%s'
conf.env.RES_ST = '/resource:%s'
conf.env.CS_NAME = 'csc'
if str(conf.env.MCS).lower().find('mcs') > -1:
conf.env.CS_NAME = 'mono'
def options(opt):
"""
Add a command-line option for the configuration::
$ waf configure --with-csc-binary=/foo/bar/mcs
"""
opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
class fake_csshlib(Task.Task):
"""
Task used for reading a foreign .net assembly and adding the dependency on it
"""
color = 'YELLOW'
inst_to = None
def runnable_status(self):
return Task.SKIP_ME
@conf
def read_csshlib(self, name, paths=[]):
"""
Read a foreign .net assembly for the *use* system::
def build(bld):
bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath])
bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll')
:param name: Name of the library
:type name: string
:param paths: Folders in which the library may be found
:type paths: list of string
:return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib`
:rtype: :py:class:`waflib.TaskGen.task_gen`
"""
return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib')
| bsd-3-clause | -4,644,363,916,836,639,000 | 29.317536 | 115 | 0.663436 | false |
dhruve/spark | examples/src/main/python/ml/string_indexer_example.py | 123 | 1402 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StringIndexerExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
indexed = indexer.fit(df).transform(df)
indexed.show()
# $example off$
spark.stop()
| apache-2.0 | -2,375,105,611,489,932,000 | 33.195122 | 75 | 0.691155 | false |
piyushroshan/xen-4.3 | tools/python/logging/logging-0.4.9.2/logging/handlers.py | 42 | 28606 | # Copyright 2001-2004 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, socket, types, os, string, cPickle, struct, time
from SocketServer import ThreadingTCPServer, StreamRequestHandler
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
class RotatingFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", maxBytes=0, backupCount=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
logging.FileHandler.__init__(self, filename, mode)
self.maxBytes = maxBytes
self.backupCount = backupCount
if maxBytes > 0:
self.mode = "a"
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
self.stream.close()
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.stream = open(self.baseFilename, "w")
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
self.doRollover()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = cPickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER):
"""
Initialize a handler.
If address is specified as a string, UNIX socket is used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
if type(address) == types.StringType:
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
self.unixsocket = 1
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.unixsocket = 0
self.formatter = None
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority (self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if type(facility) == types.StringType:
facility = self.facility_names[facility]
if type(priority) == types.StringType:
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
msg = self.log_format_string % (
self.encodePriority(self.facility,
string.lower(record.levelname)),
msg)
try:
if self.unixsocket:
self.socket.send(msg)
else:
self.socket.sendto(msg, self.address)
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument.
"""
logging.Handler.__init__(self)
if type(mailhost) == types.TupleType:
host, port = mailhost
self.mailhost = host
self.mailport = port
else:
self.mailhost = mailhost
self.mailport = None
self.fromaddr = fromaddr
if type(toaddrs) == types.StringType:
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def date_time(self):
"""Return the current date and time formatted for a MIME header."""
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
string.join(self.toaddrs, ","),
self.getSubject(record),
self.date_time(), msg)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print "The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available."
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = string.upper(method)
if method not in ["GET", "POST"]:
raise ValueError, "method must be GET or POST"
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as an URL-encoded dictionary
"""
try:
import httplib, urllib
h = httplib.HTTP(self.host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (string.find(url, '?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
if self.method == "POST":
h.putheader("Content-length", str(len(data)))
h.endheaders()
if self.method == "POST":
h.send(data)
h.getreply() #can't do anything with the result
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
| gpl-2.0 | 3,200,678,611,683,470,300 | 35.348158 | 87 | 0.579808 | false |
raags/ansible-modules-core | windows/win_ping.py | 208 | 1376 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module.
description:
- Checks management connectivity of a windows host
options:
data:
description:
- Alternate data to return instead of 'pong'
required: false
default: 'pong'
aliases: []
author: "Chris Church (@cchurch)"
'''
EXAMPLES = '''
# Test connectivity to a windows host
ansible winserver -m win_ping
# Example from an Ansible Playbook
- action: win_ping
'''
| gpl-3.0 | 5,008,684,289,495,437,000 | 27.666667 | 70 | 0.725291 | false |
lurch/python-gpiozero | gpiozero/pins/pigpiod.py | 1 | 9761 | from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import warnings
import pigpio
import os
from . import Pin
from .data import pi_info
from ..exc import (
PinInvalidFunction,
PinSetInput,
PinFixedPull,
PinInvalidPull,
PinInvalidBounce,
PinInvalidState,
PinNonPhysical,
PinNoPins,
)
class PiGPIOPin(Pin):
"""
Uses the `pigpio`_ library to interface to the Pi's GPIO pins. The pigpio
library relies on a daemon (``pigpiod``) to be running as root to provide
access to the GPIO pins, and communicates with this daemon over a network
socket.
While this does mean only the daemon itself should control the pins, the
architecture does have several advantages:
* Pins can be remote controlled from another machine (the other
machine doesn't even have to be a Raspberry Pi; it simply needs the
`pigpio`_ client library installed on it)
* The daemon supports hardware PWM via the DMA controller
* Your script itself doesn't require root privileges; it just needs to
be able to communicate with the daemon
You can construct pigpiod pins manually like so::
from gpiozero.pins.pigpiod import PiGPIOPin
from gpiozero import LED
led = LED(PiGPIOPin(12))
This is particularly useful for controlling pins on a remote machine. To
accomplish this simply specify the host (and optionally port) when
constructing the pin::
from gpiozero.pins.pigpiod import PiGPIOPin
from gpiozero import LED
from signal import pause
led = LED(PiGPIOPin(12, host='192.168.0.2'))
.. note::
In some circumstances, especially when playing with PWM, it does appear
to be possible to get the daemon into "unusual" states. We would be
most interested to hear any bug reports relating to this (it may be a
bug in our pin implementation). A workaround for now is simply to
restart the ``pigpiod`` daemon.
.. _pigpio: http://abyz.co.uk/rpi/pigpio/
"""
_CONNECTIONS = {} # maps (host, port) to (connection, pi_info)
_PINS = {}
GPIO_FUNCTIONS = {
'input': pigpio.INPUT,
'output': pigpio.OUTPUT,
'alt0': pigpio.ALT0,
'alt1': pigpio.ALT1,
'alt2': pigpio.ALT2,
'alt3': pigpio.ALT3,
'alt4': pigpio.ALT4,
'alt5': pigpio.ALT5,
}
GPIO_PULL_UPS = {
'up': pigpio.PUD_UP,
'down': pigpio.PUD_DOWN,
'floating': pigpio.PUD_OFF,
}
GPIO_EDGES = {
'both': pigpio.EITHER_EDGE,
'rising': pigpio.RISING_EDGE,
'falling': pigpio.FALLING_EDGE,
}
GPIO_FUNCTION_NAMES = {v: k for (k, v) in GPIO_FUNCTIONS.items()}
GPIO_PULL_UP_NAMES = {v: k for (k, v) in GPIO_PULL_UPS.items()}
GPIO_EDGES_NAMES = {v: k for (k, v) in GPIO_EDGES.items()}
def __new__(
cls, number, host=os.getenv('PIGPIO_ADDR', 'localhost'),
port=int(os.getenv('PIGPIO_PORT', 8888))):
try:
return cls._PINS[(host, port, number)]
except KeyError:
self = super(PiGPIOPin, cls).__new__(cls)
cls.pi_info(host, port) # implicitly creates connection
self._connection, self._pi_info = cls._CONNECTIONS[(host, port)]
try:
self._pi_info.physical_pin('GPIO%d' % number)
except PinNoPins:
warnings.warn(
PinNonPhysical(
'no physical pins exist for GPIO%d' % number))
self._host = host
self._port = port
self._number = number
self._pull = 'up' if self._pi_info.pulled_up('GPIO%d' % number) else 'floating'
self._pwm = False
self._bounce = None
self._when_changed = None
self._callback = None
self._edges = pigpio.EITHER_EDGE
try:
self._connection.set_mode(self._number, pigpio.INPUT)
except pigpio.error as e:
raise ValueError(e)
self._connection.set_pull_up_down(self._number, self.GPIO_PULL_UPS[self._pull])
self._connection.set_glitch_filter(self._number, 0)
cls._PINS[(host, port, number)] = self
return self
def __repr__(self):
if self._host == 'localhost':
return "GPIO%d" % self._number
else:
return "GPIO%d on %s:%d" % (self._number, self._host, self._port)
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def number(self):
return self._number
def close(self):
# If we're shutting down, the connection may have disconnected itself
# already. Unfortunately, the connection's "connected" property is
# rather buggy - disconnecting doesn't set it to False! So we're
# naughty and check an internal variable instead...
if self._connection.sl.s is not None:
self.frequency = None
self.when_changed = None
self.function = 'input'
self.pull = 'up' if self._pi_info.pulled_up('GPIO%d' % self.number) else 'floating'
def _get_function(self):
return self.GPIO_FUNCTION_NAMES[self._connection.get_mode(self._number)]
def _set_function(self, value):
if value != 'input':
self._pull = 'floating'
try:
self._connection.set_mode(self._number, self.GPIO_FUNCTIONS[value])
except KeyError:
raise PinInvalidFunction('invalid function "%s" for pin %r' % (value, self))
def _get_state(self):
if self._pwm:
return (
self._connection.get_PWM_dutycycle(self._number) /
self._connection.get_PWM_range(self._number)
)
else:
return bool(self._connection.read(self._number))
def _set_state(self, value):
if self._pwm:
try:
value = int(value * self._connection.get_PWM_range(self._number))
if value != self._connection.get_PWM_dutycycle(self._number):
self._connection.set_PWM_dutycycle(self._number, value)
except pigpio.error:
raise PinInvalidState('invalid state "%s" for pin %r' % (value, self))
elif self.function == 'input':
raise PinSetInput('cannot set state of pin %r' % self)
else:
# write forces pin to OUTPUT, hence the check above
self._connection.write(self._number, bool(value))
def _get_pull(self):
return self._pull
def _set_pull(self, value):
if self.function != 'input':
raise PinFixedPull('cannot set pull on non-input pin %r' % self)
if value != 'up' and self._pi_info.pulled_up('GPIO%d' % self._number):
raise PinFixedPull('%r has a physical pull-up resistor' % self)
try:
self._connection.set_pull_up_down(self._number, self.GPIO_PULL_UPS[value])
self._pull = value
except KeyError:
raise PinInvalidPull('invalid pull "%s" for pin %r' % (value, self))
def _get_frequency(self):
if self._pwm:
return self._connection.get_PWM_frequency(self._number)
return None
def _set_frequency(self, value):
if not self._pwm and value is not None:
self._connection.set_PWM_frequency(self._number, value)
self._connection.set_PWM_range(self._number, 10000)
self._connection.set_PWM_dutycycle(self._number, 0)
self._pwm = True
elif self._pwm and value is not None:
if value != self._connection.get_PWM_frequency(self._number):
self._connection.set_PWM_frequency(self._number, value)
self._connection.set_PWM_range(self._number, 10000)
elif self._pwm and value is None:
self._connection.write(self._number, 0)
self._pwm = False
def _get_bounce(self):
return None if not self._bounce else self._bounce / 1000000
def _set_bounce(self, value):
if value is None:
value = 0
elif value < 0:
raise PinInvalidBounce('bounce must be 0 or greater')
self._connection.set_glitch_filter(self._number, int(value * 1000000))
def _get_edges(self):
return self.GPIO_EDGES_NAMES[self._edges]
def _set_edges(self, value):
f = self.when_changed
self.when_changed = None
try:
self._edges = self.GPIO_EDGES[value]
finally:
self.when_changed = f
def _get_when_changed(self):
if self._callback is None:
return None
return self._callback.callb.func
def _set_when_changed(self, value):
if self._callback is not None:
self._callback.cancel()
self._callback = None
if value is not None:
self._callback = self._connection.callback(
self._number, self._edges,
lambda gpio, level, tick: value())
@classmethod
def pi_info(
cls, host=os.getenv('PIGPIO_ADDR', 'localhost'),
port=int(os.getenv('PIGPIO_PORT', 8888))):
try:
connection, info = cls._CONNECTIONS[(host, port)]
except KeyError:
connection = pigpio.pi(host, port)
revision = '%04x' % connection.get_hardware_revision()
info = pi_info(revision)
cls._CONNECTIONS[(host, port)] = (connection, info)
return info
| bsd-3-clause | 728,298,450,364,288,800 | 34.111511 | 95 | 0.580986 | false |
moehle/cvxpy_codegen | cvxpy_codegen/atoms/kron.py | 1 | 1098 | """
Copyright 2017 Nicholas Moehle
This file is part of CVXPY-CODEGEN.
CVXPY-CODEGEN is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY-CODEGEN is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY-CODEGEN. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy_codegen.param.expr_data import AtomData
import scipy.sparse as sp
def getdata_kron(expr, arg_data):
return [AtomData(expr, arg_data,
macro_name = "kron",
sparsity = sp.kron(arg_data[0].sparsity, arg_data[1].sparsity),
work_int = arg_data[0].sparsity.shape[1],
work_float = arg_data[0].sparsity.shape[1])]
| gpl-3.0 | 5,520,203,606,025,942,000 | 38.214286 | 84 | 0.70765 | false |
woobe/h2o | py/testdir_single_jvm/test_GLM2_tweedie.py | 2 | 2449 | import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_util
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_tweedie(self):
h2o.beta_features = True
csvFilename = "AutoClaim.csv"
csvPathname = 'standard/' + csvFilename
print "\nStarting", csvPathname
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put')
# columns start at 0
# regress: glm(CLM_AMT ~ CAR_USE + REVOLKED + GENDER + AREA + MARRIED + CAR_TYPE, data=AutoClaim, family=tweedie(1.34))
coefs = [7, 13, 20, 27, 21, 11]
y = "4"
ignored_cols = h2o_cmd.createIgnoredCols(key=parseResult['destination_key'], cols=coefs, response=y)
# sapply(c('CLM_AMT', 'CAR_USE', 'REVOLKED', 'GENDER', 'AREA', 'MARRIED', 'CAR_TYPE'), function(x) which(x==colnames(AutoClaim)) - 1)
kwargs = {
'family': 'tweedie',
'tweedie_variance_power': 1.36,
'response': y,
'ignored_cols' : ignored_cols,
'max_iter': 10,
'lambda': 0,
'alpha': 0,
'n_folds': 0,
'beta_epsilon': 1e-4,
}
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=15, **kwargs)
coefficientsExpected = {'Intercept': 0, 'GENDER.M': 0.0014842488782470984, 'CAR_TYPE.Sports Car': 0.07786742314454961, 'MARRIED.Yes': 0.0007748552195851079, 'CAR_TYPE.SUV': 0.07267702940249621, 'CAR_TYPE.Pickup': 0.04952083408742968, 'CAR_TYPE.Van': 0.026422137690691405, 'CAR_TYPE.Sedan': 0.05128350794060489, 'CAR_USE.Private': -0.03050194832853935, 'REVOLKED.Yes': -0.05095942737408699}
deltaExpected = 0.05
(warnings, coefficients, intercept) = h2o_glm.simpleCheckGLM(self, glm, None,
coefficientsExpected=coefficientsExpected, deltaExpected=deltaExpected, **kwargs)
print 'coefficients: %s' % (str(coefficients))
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -7,827,875,249,452,787,000 | 41.224138 | 397 | 0.600245 | false |
dfalt974/SickRage | lib/github/tests/Issue.py | 7 | 9092 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Stuart Glaser <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class Issue(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_user().get_repo("PyGithub")
self.issue = self.repo.get_issue(28)
def testAttributes(self):
self.assertEqual(self.issue.assignee.login, "jacquev6")
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
self.assertEqual(self.issue.body, "Body edited by PyGithub")
self.assertEqual(self.issue.closed_at, datetime.datetime(2012, 5, 26, 14, 59, 33))
self.assertEqual(self.issue.closed_by.login, "jacquev6")
self.assertEqual(self.issue.comments, 0)
self.assertEqual(self.issue.created_at, datetime.datetime(2012, 5, 19, 10, 38, 23))
self.assertEqual(self.issue.html_url, "https://github.com/jacquev6/PyGithub/issues/28")
self.assertEqual(self.issue.id, 4653757)
self.assertListKeyEqual(self.issue.labels, lambda l: l.name, ["Bug", "Project management", "Question"])
self.assertEqual(self.issue.milestone.title, "Version 0.4")
self.assertEqual(self.issue.number, 28)
self.assertEqual(self.issue.pull_request.diff_url, None)
self.assertEqual(self.issue.pull_request.patch_url, None)
self.assertEqual(self.issue.pull_request.html_url, None)
self.assertEqual(self.issue.state, "closed")
self.assertEqual(self.issue.title, "Issue created by PyGithub")
self.assertEqual(self.issue.updated_at, datetime.datetime(2012, 5, 26, 14, 59, 33))
self.assertEqual(self.issue.url, "https://api.github.com/repos/jacquev6/PyGithub/issues/28")
self.assertEqual(self.issue.user.login, "jacquev6")
self.assertEqual(self.issue.repository.name, "PyGithub")
# test __repr__() based on this attributes
self.assertEqual(self.issue.__repr__(), 'Issue(title="Issue created by PyGithub", number=28)')
def testEditWithoutParameters(self):
self.issue.edit()
def testEditWithAllParameters(self):
user = self.g.get_user("jacquev6")
milestone = self.repo.get_milestone(2)
self.issue.edit("Title edited by PyGithub", "Body edited by PyGithub", user, "open", milestone, ["Bug"], ["jacquev6", "stuglaser"])
self.assertEqual(self.issue.assignee.login, "jacquev6")
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
self.assertEqual(self.issue.body, "Body edited by PyGithub")
self.assertEqual(self.issue.state, "open")
self.assertEqual(self.issue.title, "Title edited by PyGithub")
self.assertListKeyEqual(self.issue.labels, lambda l: l.name, ["Bug"])
def testEditResetMilestone(self):
self.assertEqual(self.issue.milestone.title, "Version 0.4")
self.issue.edit(milestone=None)
self.assertEqual(self.issue.milestone, None)
def testEditResetAssignee(self):
self.assertEqual(self.issue.assignee.login, "jacquev6")
self.issue.edit(assignee=None)
self.assertEqual(self.issue.assignee, None)
def testCreateComment(self):
comment = self.issue.create_comment("Comment created by PyGithub")
self.assertEqual(comment.id, 5808311)
def testGetComments(self):
self.assertListKeyEqual(self.issue.get_comments(), lambda c: c.user.login, ["jacquev6", "roskakori"])
def testGetCommentsSince(self):
self.assertListKeyEqual(self.issue.get_comments(datetime.datetime(2012, 5, 26, 13, 59, 33)), lambda c: c.user.login, ["jacquev6", "roskakori"])
def testGetEvents(self):
self.assertListKeyEqual(self.issue.get_events(), lambda e: e.id, [15819975, 15820048])
def testGetLabels(self):
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
def testAddAndRemoveAssignees(self):
user1 = "jayfk"
user2 = self.g.get_user("jzelinskie")
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
self.issue.add_to_assignees(user1, user2)
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser", "jayfk", "jzelinskie"])
self.issue.remove_from_assignees(user1, user2)
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
def testAddAndRemoveLabels(self):
bug = self.repo.get_label("Bug")
question = self.repo.get_label("Question")
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.remove_from_labels(bug)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management", "Question"])
self.issue.remove_from_labels(question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management"])
self.issue.add_to_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
def testAddAndRemoveLabelsWithStringArguments(self):
bug = "Bug"
question = "Question"
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.remove_from_labels(bug)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management", "Question"])
self.issue.remove_from_labels(question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management"])
self.issue.add_to_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
def testDeleteAndSetLabels(self):
bug = self.repo.get_label("Bug")
question = self.repo.get_label("Question")
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.delete_labels()
self.assertListKeyEqual(self.issue.get_labels(), None, [])
self.issue.set_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Question"])
def testDeleteAndSetLabelsWithStringArguments(self):
bug = "Bug"
question = "Question"
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.delete_labels()
self.assertListKeyEqual(self.issue.get_labels(), None, [])
self.issue.set_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Question"])
def testGetReactions(self):
reactions = self.issue.get_reactions()
self.assertEqual(reactions[0].content, "+1")
def testCreateReaction(self):
reaction = self.issue.create_reaction("hooray")
self.assertEqual(reaction.id, 16917472)
self.assertEqual(reaction.content, "hooray")
| gpl-3.0 | 3,432,881,002,582,383,600 | 55.123457 | 151 | 0.613946 | false |
tmpgit/intellij-community | python/lib/Lib/encodings/mbcs.py | 860 | 1211 | """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond ([email protected]) from ascii.py,
which was written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 | 2,533,366,814,072,854,500 | 24.765957 | 65 | 0.729149 | false |
xen0l/ansible | lib/ansible/modules/windows/win_shell.py | 28 | 4846 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_shell
short_description: Execute shell commands on target hosts
version_added: 2.2
description:
- The C(win_shell) module takes the command name followed by a list of space-delimited arguments.
It is similar to the M(win_command) module, but runs
the command via a shell (defaults to PowerShell) on the target host.
- For non-Windows targets, use the M(shell) module instead.
options:
free_form:
description:
- The C(win_shell) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command
type: path
executable:
description:
- Change the shell used to execute the command (eg, C(cmd)).
- The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: '2.5'
notes:
- If you want to run an executable securely and predictably, it may be
better to use the M(win_command) module instead. Best practices when writing
playbooks will follow the trend of using M(win_command) unless C(win_shell) is
explicitly required. When running ad-hoc commands, use your best judgement.
- WinRM will not return from a command execution until all child processes created have exited.
Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes.
Consider creating a Windows service for managing background processes.
- For non-Windows targets, use the M(shell) module instead.
- See also M(win_command), M(raw)
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
# Execute a command in the remote shell; stdout goes to the specified
# file on the remote.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
# Change the working directory to somedir/ before executing the command.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
args:
chdir: C:\somedir
creates: C:\somelog.txt
# Run a command under a non-Powershell interpreter (cmd in this case)
- win_shell: echo %HOMEDIR%
args:
executable: cmd
register: homedir_out
- name: run multi-lined shell commands
win_shell: |
$value = Test-Path -Path C:\temp
if ($value) {
Remove-Item -Path C:\temp -Force
}
New-Item -Path C:\temp -ItemType Directory
- name: retrieve the input based on stdin
win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()'
args:
stdin: Input message
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 | -990,400,696,005,544,700 | 32.652778 | 127 | 0.692117 | false |
draenog/gitolite-scripts | hooks/post-receive.d/gnome/git.py | 1 | 6496 | # Utility functions for git
#
# Copyright (C) 2008 Owen Taylor
# Copyright (C) 2009 Red Hat, Inc
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, If not, see
# http://www.gnu.org/licenses/.
#
# (These are adapted from git-bz)
import os
import re
from subprocess import Popen, PIPE
import sys
from util import die
# Clone of subprocess.CalledProcessError (not in Python 2.4)
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
NULL_REVISION = "0000000000000000000000000000000000000000"
# Run a git command
# Non-keyword arguments are passed verbatim as command line arguments
# Keyword arguments are turned into command line options
# <name>=True => --<name>
# <name>='<str>' => --<name>=<str>
# Special keyword arguments:
# _quiet: Discard all output even if an error occurs
# _interactive: Don't capture stdout and stderr
# _input=<str>: Feed <str> to stdinin of the command
# _outfile=<file): Use <file> as the output file descriptor
# _split_lines: Return an array with one string per returned line
#
def git_run(command, *args, **kwargs):
to_run = ['git', command.replace("_", "-")]
interactive = False
quiet = False
input = None
interactive = False
outfile = None
do_split_lines = False
for (k,v) in kwargs.items():
if k == '_quiet':
quiet = True
elif k == '_interactive':
interactive = True
elif k == '_input':
input = v
elif k == '_outfile':
outfile = v
elif k == '_split_lines':
do_split_lines = True
elif v is True:
if len(k) == 1:
to_run.append("-" + k)
else:
to_run.append("--" + k.replace("_", "-"))
else:
if len(k) == 1:
to_run.append("-" + k + v)
else:
to_run.append("--" + k.replace("_", "-") + "=" + v)
to_run.extend(args)
if outfile:
stdout = outfile
else:
if interactive:
stdout = None
else:
stdout = PIPE
if interactive:
stderr = None
else:
stderr = PIPE
if input != None:
stdin = PIPE
else:
stdin = None
process = Popen(to_run,
stdout=stdout, stderr=stderr, stdin=stdin)
output, error = process.communicate(input)
if process.returncode != 0:
if not quiet and not interactive:
print(error, end=' ', file=sys.stderr)
print(output, end=' ')
raise CalledProcessError(process.returncode, " ".join(to_run))
if interactive or outfile:
return None
else:
output = output.decode('utf8')
if do_split_lines:
return output.strip().splitlines()
else:
return output.strip()
# Wrapper to allow us to do git.<command>(...) instead of git_run()
class Git:
def __getattr__(self, command):
def f(*args, **kwargs):
return git_run(command, *args, **kwargs)
return f
git = Git()
class GitCommit:
def __init__(self, id, subject):
self.id = id
self.subject = subject
# Takes argument like 'git.rev_list()' and returns a list of commit objects
def rev_list_commits(*args, **kwargs):
kwargs_copy = dict(kwargs)
kwargs_copy['pretty'] = 'format:%s'
kwargs_copy['_split_lines'] = True
lines = git.rev_list(*args, **kwargs_copy)
if (len(lines) % 2 != 0):
raise RuntimeException("git rev-list didn't return an even number of lines")
result = []
for i in range(0, len(lines), 2):
m = re.match("commit\s+([A-Fa-f0-9]+)", lines[i])
if not m:
raise RuntimeException("Can't parse commit it '%s'", lines[i])
commit_id = m.group(1)
subject = lines[i + 1]
result.append(GitCommit(commit_id, subject))
return result
# Loads a single commit object by ID
def load_commit(commit_id):
return rev_list_commits(commit_id + "^!")[0]
# Return True if the commit has multiple parents
def commit_is_merge(commit):
if isinstance(commit, str):
commit = load_commit(commit)
parent_count = 0
for line in git.cat_file("commit", commit.id, _split_lines=True):
if line == "":
break
if line.startswith("parent "):
parent_count += 1
return parent_count > 1
# Return a short one-line summary of the commit
def commit_oneline(commit):
if isinstance(commit, str):
commit = load_commit(commit)
return commit.id[0:7]+"... " + commit.subject[0:59]
# Return the directory name with .git stripped as a short identifier
# for the module
def get_module_name():
try:
git_dir = git.rev_parse(git_dir=True, _quiet=True)
except CalledProcessError:
die("GIT_DIR not set")
# Use the directory name with .git stripped as a short identifier
absdir = os.path.abspath(git_dir)
if absdir.endswith(os.sep + '.git'):
absdir = os.path.dirname(absdir)
projectshort = os.path.basename(absdir)
if projectshort.endswith(".git"):
projectshort = projectshort[:-4]
return projectshort
# Return the project description or '' if it is 'Unnamed repository;'
def get_project_description():
try:
git_dir = git.rev_parse(git_dir=True, _quiet=True)
except CalledProcessError:
die("GIT_DIR not set")
projectdesc = ''
description = os.path.join(git_dir, 'description')
if os.path.exists(description):
try:
projectdesc = open(description).read().strip()
except:
pass
if projectdesc.startswith('Unnamed repository;'):
projectdesc = ''
return projectdesc
| gpl-2.0 | 7,213,760,666,948,043,000 | 29.213953 | 92 | 0.605757 | false |
shibaniahegde/OpenStak_swift | swift/common/middleware/domain_remap.py | 20 | 6262 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Domain Remap Middleware
Middleware that translates container and account parts of a domain to
path parameters that the proxy server understands.
container.account.storageurl/object gets translated to
container.account.storageurl/path_root/account/container/object
account.storageurl/path_root/container/object gets translated to
account.storageurl/path_root/account/container/object
Browsers can convert a host header to lowercase, so check that reseller
prefix on the account is the correct case. This is done by comparing the
items in the reseller_prefixes config option to the found prefix. If they
match except for case, the item from reseller_prefixes will be used
instead of the found reseller prefix. When none match, the default reseller
prefix is used. When no default reseller prefix is configured, any request with
an account prefix not in that list will be ignored by this middleware.
reseller_prefixes defaults to 'AUTH'.
Note that this middleware requires that container names and account names
(except as described above) must be DNS-compatible. This means that the
account name created in the system and the containers created by users
cannot exceed 63 characters or have UTF-8 characters. These are
restrictions over and above what swift requires and are not explicitly
checked. Simply put, the this middleware will do a best-effort attempt to
derive account and container names from elements in the domain name and
put those derived values into the URL path (leaving the Host header
unchanged).
Also note that using container sync with remapped domain names is not
advised. With container sync, you should use the true storage end points as
sync destinations.
"""
from swift.common.swob import Request, HTTPBadRequest
from swift.common.utils import list_from_csv, register_swift_info
class DomainRemapMiddleware(object):
"""
Domain Remap Middleware
See above for a full description.
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
self.app = app
self.storage_domain = conf.get('storage_domain', 'example.com')
if self.storage_domain and self.storage_domain[0] != '.':
self.storage_domain = '.' + self.storage_domain
self.path_root = conf.get('path_root', 'v1').strip('/')
prefixes = conf.get('reseller_prefixes', 'AUTH')
self.reseller_prefixes = list_from_csv(prefixes)
self.reseller_prefixes_lower = [x.lower()
for x in self.reseller_prefixes]
self.default_reseller_prefix = conf.get('default_reseller_prefix')
def __call__(self, env, start_response):
if not self.storage_domain:
return self.app(env, start_response)
if 'HTTP_HOST' in env:
given_domain = env['HTTP_HOST']
else:
given_domain = env['SERVER_NAME']
port = ''
if ':' in given_domain:
given_domain, port = given_domain.rsplit(':', 1)
if given_domain.endswith(self.storage_domain):
parts_to_parse = given_domain[:-len(self.storage_domain)]
parts_to_parse = parts_to_parse.strip('.').split('.')
len_parts_to_parse = len(parts_to_parse)
if len_parts_to_parse == 2:
container, account = parts_to_parse
elif len_parts_to_parse == 1:
container, account = None, parts_to_parse[0]
else:
resp = HTTPBadRequest(request=Request(env),
body='Bad domain in host header',
content_type='text/plain')
return resp(env, start_response)
if len(self.reseller_prefixes) > 0:
if '_' not in account and '-' in account:
account = account.replace('-', '_', 1)
account_reseller_prefix = account.split('_', 1)[0].lower()
if account_reseller_prefix in self.reseller_prefixes_lower:
prefix_index = self.reseller_prefixes_lower.index(
account_reseller_prefix)
real_prefix = self.reseller_prefixes[prefix_index]
if not account.startswith(real_prefix):
account_suffix = account[len(real_prefix):]
account = real_prefix + account_suffix
elif self.default_reseller_prefix:
# account prefix is not in config list. Add default one.
account = "%s_%s" % (self.default_reseller_prefix, account)
else:
# account prefix is not in config list. bail.
return self.app(env, start_response)
path = env['PATH_INFO'].strip('/')
new_path_parts = ['', self.path_root, account]
if container:
new_path_parts.append(container)
if path.startswith(self.path_root):
path = path[len(self.path_root):].lstrip('/')
if path:
new_path_parts.append(path)
new_path = '/'.join(new_path_parts)
env['PATH_INFO'] = new_path
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info(
'domain_remap',
default_reseller_prefix=conf.get('default_reseller_prefix'))
def domain_filter(app):
return DomainRemapMiddleware(app, conf)
return domain_filter
| apache-2.0 | 5,116,620,896,321,533,000 | 42.186207 | 79 | 0.644363 | false |
chafique-delli/OpenUpgrade | addons/l10n_ro/__openerp__.py | 40 | 1804 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 (<http://www.erpsystems.ro>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "TOTAL PC SYSTEMS",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the accounting chart, VAT structure and Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo_xml" : [],
"data" : ['partner_view.xml','account_tax_code_template.xml','account_chart.xml','account_tax_template.xml','l10n_chart_ro_wizard.xml'],
"auto_install": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,259,686,583,989,112,000 | 44.1 | 140 | 0.586475 | false |
git-keeper/git-keeper | git-keeper-server/gkeepserver/log_polling.py | 1 | 9468 | # Copyright 2016 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Provides a class and a global access point for a log polling object.
The module stores a LogPollingThread instance in the module-level variable
named log_poller. Call initialize() on this object as early as possible to set
it up, and then call start() to start the thread.
Files to be watched can be added to the polling object. New events from the
log are passed on to a parser and then an appropriate handler.
It is possible to add files to be watched before calling initialize(), but no
actions can be taken until the thread is initialized and started.
The sizes of the log files are stored in the database after every log
modification. This allows the poller to start where it left off if the process
is restarted.
Example usage::
from gkeepcore.log_polling import log_poller
# import whatever byte_count_function and read_bytes_function you need
def main():
# set up other stuff
log_poller.initialize(new_log_event_queue, byte_count_function,
read_bytes_function, gkeepd_logger)
log_poller.start()
log_poller.watch_log_file('/path/to/log')
while keep_going:
log_file_path, log_event = new_log_event_queue.get()
# do something with the event
log_poller.shutdown()
"""
import json
import os
from queue import Queue, Empty
from threading import Thread
from time import time, sleep
from gkeepcore.gkeep_exception import GkeepException
from gkeepcore.log_file import LogFileReader, LogFileException
from gkeepcore.system_commands import file_is_readable
from gkeepserver.database import db
from gkeepserver.gkeepd_logger import GkeepdLoggerThread
class LogPollingThreadError(GkeepException):
"""Raised if there is an error polling log files."""
pass
class LogPollingThread(Thread):
"""
Watches log files for modifications.
New events from log files are put in a queue to be processed by another
thread.
See the module-level documentation for usage.
"""
def __init__(self):
"""
Constructor.
Create the _add_log_queue so that files to be watched can be added
before the thread is started. Initialize all other attributes to None.
Poller will be fully set up and ready to start after initialize() is
called.
"""
Thread.__init__(self)
# initialize this so we can add files to watch before the thread starts
self._add_log_queue = Queue()
self._new_log_event_queue = None
self._reader_class = None
self._polling_interval = None
self._last_poll_time = None
self._logger = None
self._log_file_readers = None
self._shutdown_flag = None
def initialize(self, new_log_event_queue: Queue, reader_class,
logger: GkeepdLoggerThread, polling_interval=0.5):
"""
Initialize the attributes.
:param new_log_event_queue: the poller places (file_path, event) pairs
into this queue
:param reader_class: LogFileReader class to use for creating readers
:param logger: the system logger, used to log runtime information
:param polling_interval: number of seconds between polling files
"""
self._new_log_event_queue = new_log_event_queue
self._reader_class = reader_class
self._polling_interval = polling_interval
self._last_poll_time = 0
self._logger = logger
# maps log file paths to log readers
self._log_file_readers = {}
self._load_paths_from_db()
self._shutdown_flag = False
def watch_log_file(self, file_path: str):
"""
Add a log file to be watched.
This method can be called from any other thread.
:param file_path: path to the log file
"""
if not file_is_readable(file_path):
error = '{} is not a readable file'.format(file_path)
raise GkeepException(error)
self._add_log_queue.put(file_path)
def shutdown(self):
"""
Shut down the poller.
The run loop will not shut down until the current polling cycle
is complete.
This method will block until the thread dies.
"""
self._shutdown_flag = True
self.join()
def run(self):
# Poll until _shutdown_flag is True.
#
# This should not be called directly, the thread should be started by
# calling start()
while not self._shutdown_flag:
try:
self._poll()
except Exception as e:
self._logger.log_error('Error polling logs: {0}'
.format(e))
def _load_paths_from_db(self):
for log_file_path, byte_count in db.get_byte_counts():
self._logger.log_debug('Watching {} from byte {}'
.format(log_file_path, byte_count))
self._create_and_add_reader(log_file_path,
seek_position=byte_count)
def _write_byte_counts_to_db(self):
# Writes all of the current file byte counts to the database.
# Called after updates.
byte_counts_by_file_path = {}
for file_path, reader in self._log_file_readers.items():
byte_counts_by_file_path[file_path] = reader.get_seek_position()
try:
db.update_byte_counts(byte_counts_by_file_path)
except GkeepException as e:
raise LogPollingThreadError('Error updating byte counts: {}'
.format(e))
def _write_byte_count_to_db(self, file_path):
# Writes a single file's byte count to the database.
update = {
file_path: self._log_file_readers[file_path].get_seek_position()
}
db.update_byte_counts(update)
def _start_watching_log_file(self, file_path: str):
# Start watching the file at file_path. This should only be called
# internally. Other threads should call watch_log_file()
try:
self._create_and_add_reader(file_path)
self._write_byte_count_to_db(file_path)
except GkeepException as e:
self._logger.log_warning(str(e))
def _create_and_add_reader(self, file_path: str, seek_position=None):
# Create a LogFileReader object for reading new data from the file
# and add it to the dictionary of readers.
# bail if the log does not exist
if not os.path.isfile(file_path):
warning = ('{0} does not exist and will not be watched'
.format(file_path))
self._logger.log_warning(warning)
return
reader = self._reader_class(file_path, seek_position=seek_position)
self._log_file_readers[file_path] = reader
def _stop_watching_log_file(self, log_file: LogFileReader):
# Simply remove the file reader from the dictionary
file_path = log_file.get_file_path()
del self._log_file_readers[file_path]
self._write_byte_count_to_db(file_path)
def _poll(self):
# Poll once for changes in files, and check the queue for new files
# to watch.
self._last_poll_time = time()
readers = list(self._log_file_readers.values())
# for each file reader, add any new events to the queue
for reader in readers:
try:
for event in reader.get_new_events():
file_path = reader.get_file_path()
self._new_log_event_queue.put((file_path, event))
self._write_byte_count_to_db(file_path)
except LogFileException as e:
self._logger.log_warning(str(e))
# if something goes wrong we should not keep watching this file
self._stop_watching_log_file(reader)
# consume all new log files until the queue is empty
try:
while True:
new_file_path = self._add_log_queue.get(block=False)
if isinstance(new_file_path, str):
self._start_watching_log_file(new_file_path)
else:
self._logger.log_warning('Log poller: {0} is not a string'
.format(new_file_path))
except Empty:
pass
# each file should be polled on average once per polling_interval
next_poll_time = self._last_poll_time + self._polling_interval
sleep_time = next_poll_time - time()
if sleep_time > 0:
sleep(sleep_time)
# module-level instance for global access
log_poller = LogPollingThread()
| agpl-3.0 | -3,600,288,135,336,967,000 | 32.104895 | 79 | 0.621567 | false |
ammaradil/fibonacci | Lib/site-packages/django/utils/module_loading.py | 145 | 6290 | import copy
import os
import sys
from importlib import import_module
from django.utils import six
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except Exception:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
if six.PY3:
from importlib.util import find_spec as importlib_find
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
return importlib_find(full_module_name, package_path) is not None
else:
import imp
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
Raise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because _NamespacePath does not support indexing on 3.3.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
| mit | -2,450,045,815,625,227,000 | 37.121212 | 82 | 0.539428 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pip/_vendor/requests/utils.py | 319 | 24163 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, InvalidHeader, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on macOS in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get('all', proxies.get(urlparts.scheme))
proxy_keys = [
'all://' + urlparts.hostname,
'all',
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| agpl-3.0 | -1,493,731,281,731,799,600 | 28.575275 | 118 | 0.59856 | false |
Jordy281/Tic_Tac_Toe_SuperComputer | game.py | 1 | 1938 | import numpy as np
import copy
from random import randrange
"""
We will check across the diagonal top left to bottom right,
This will allow us to check all possible solutions for a win
"""
def threecheck(board):
win=False
#Top Left
if board[0]!=0:
#Row T-L to T-R
if board[0]==board[1]:
#Top Right
if board[2]==board[1]:
win=True
#Column T-L to B-L
if board[0]==board[3]:
if board[3]==board[6]:
win=True
#Middle center
if board[4]!=0:
#Diagonal T-L to B-R
if board[4]==board[0]:
if board[4]==board[8]:
win=True
#Diagonal B-L to T-R
if board[4]==board[2]:
if board[4] ==board[6]:
win=True
#Column T-M to B-M
if board[4]==board[1]:
if board[4] == board[7]:
win=True
#Row C-L to C-R
if board[4]==board[3]:
if board[4]==board[5]:
win=True
#Bottom Right
if board[8]!=0:
#Column T-R to B-R
if board[8]==board[2]:
#Top Right
if board[8]==board[5]:
win = True
#Row B-L to B-R
if board[8]==board[7]:
if board[8]==board[6]:
win=True
return win
"""
This will add the Move to the board
"""
def addMove(board, turn, index):
if turn%2==1:
board[index]=1
else:
board[index]=2
def gameOver(board, turn):
return threecheck(board) is True or turn==10
| mit | -272,497,288,709,249,920 | 23.225 | 60 | 0.395769 | false |
xme1226/sahara | sahara/tests/unit/db/migration/test_migrations.py | 2 | 13912 | # Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly.
There are also "opportunistic" tests for both mysql and postgresql in here,
which allows testing against mysql and pg) in a properly configured unit
test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
import os
from oslo.config import cfg
from oslo.db.sqlalchemy import utils as db_utils
from sahara.tests.unit.db.migration import test_migrations_base as base
CONF = cfg.CONF
class TestMigrations(base.BaseWalkMigrationTestCase, base.CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
USER = "openstack_citest"
PASSWD = "openstack_citest"
DATABASE = "openstack_citest"
def __init__(self, *args, **kwargs):
super(TestMigrations, self).__init__(*args, **kwargs)
def setUp(self):
super(TestMigrations, self).setUp()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnsExists(self, engine, table, columns):
for column in columns:
self.assertColumnExists(engine, table, column)
def assertColumnCount(self, engine, table, columns):
t = db_utils.get_table(engine, table)
self.assertEqual(len(t.columns), len(columns))
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_001(self, engine, data):
job_binary_internal_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'data',
'datasize'
]
self.assertColumnsExists(
engine, 'job_binary_internal', job_binary_internal_columns)
self.assertColumnCount(
engine, 'job_binary_internal', job_binary_internal_columns)
node_group_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'flavor_id',
'image_id',
'plugin_name',
'hadoop_version',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'floating_ip_pool'
]
self.assertColumnsExists(
engine, 'node_group_templates', node_group_templates_columns)
self.assertColumnCount(
engine, 'node_group_templates', node_group_templates_columns)
data_sources_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type',
'url',
'credentials'
]
self.assertColumnsExists(
engine, 'data_sources', data_sources_columns)
self.assertColumnCount(
engine, 'data_sources', data_sources_columns)
cluster_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'cluster_configs',
'default_image_id',
'anti_affinity',
'tenant_id',
'neutron_management_network',
'plugin_name',
'hadoop_version'
]
self.assertColumnsExists(
engine, 'cluster_templates', cluster_templates_columns)
self.assertColumnCount(
engine, 'cluster_templates', cluster_templates_columns)
job_binaries_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'url',
'extra'
]
self.assertColumnsExists(
engine, 'job_binaries', job_binaries_columns)
self.assertColumnCount(
engine, 'job_binaries', job_binaries_columns)
jobs_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type'
]
self.assertColumnsExists(engine, 'jobs', jobs_columns)
self.assertColumnCount(engine, 'jobs', jobs_columns)
templates_relations_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'flavor_id',
'image_id',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_template_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExists(
engine, 'templates_relations', templates_relations_columns)
self.assertColumnCount(
engine, 'templates_relations', templates_relations_columns)
mains_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExists(
engine, 'mains_association', mains_association_columns)
self.assertColumnCount(
engine, 'mains_association', mains_association_columns)
libs_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExists(
engine, 'libs_association', libs_association_columns)
self.assertColumnCount(
engine, 'libs_association', libs_association_columns)
clusters_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'trust_id',
'is_transient',
'plugin_name',
'hadoop_version',
'cluster_configs',
'default_image_id',
'neutron_management_network',
'anti_affinity',
'management_private_key',
'management_public_key',
'user_keypair_id',
'status',
'status_description',
'info',
'extra',
'cluster_template_id'
]
self.assertColumnsExists(engine, 'clusters', clusters_columns)
self.assertColumnCount(engine, 'clusters', clusters_columns)
node_groups_columns = [
'created_at',
'updated_at',
'id',
'name',
'tenant_id',
'flavor_id',
'image_id',
'image_username',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExists(engine, 'node_groups', node_groups_columns)
self.assertColumnCount(engine, 'node_groups', node_groups_columns)
job_executions_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'job_id',
'input_id',
'output_id',
'start_time',
'end_time',
'cluster_id',
'info',
'progress',
'oozie_job_id',
'return_code',
'job_configs',
'extra'
]
self.assertColumnsExists(
engine, 'job_executions', job_executions_columns)
self.assertColumnCount(
engine, 'job_executions', job_executions_columns)
instances_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'node_group_id',
'instance_id',
'instance_name',
'internal_ip',
'management_ip',
'volumes'
]
self.assertColumnsExists(engine, 'instances', instances_columns)
self.assertColumnCount(engine, 'instances', instances_columns)
self._data_001(engine, data)
def _data_001(self, engine, data):
datasize = 512 * 1024 # 512kB
data = os.urandom(datasize)
t = db_utils.get_table(engine, 'job_binary_internal')
engine.execute(t.insert(), data=data, id='123', name='name')
new_data = engine.execute(t.select()).fetchone().data
self.assertEqual(data, new_data)
engine.execute(t.delete())
def _check_002(self, engine, data):
# currently, 002 is just a placeholder
pass
def _check_003(self, engine, data):
# currently, 003 is just a placeholder
pass
def _check_004(self, engine, data):
# currently, 004 is just a placeholder
pass
def _check_005(self, engine, data):
# currently, 005 is just a placeholder
pass
def _check_006(self, engine, data):
# currently, 006 is just a placeholder
pass
def _pre_upgrade_007(self, engine):
desc = 'magic'
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='pname',
hadoop_version='1', management_private_key='2',
management_public_key='3', status_description=desc)
def _check_007(self, engine, data):
t = db_utils.get_table(engine, 'clusters')
res = engine.execute(t.select(), id='123').first()
self.assertEqual('magic', res['status_description'])
engine.execute(t.delete())
# check that status_description can keep 128kb.
# MySQL varchar can not keep more then 64kb
desc = 'a' * 128 * 1024 # 128kb
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='plname',
hadoop_version='hversion', management_private_key='1',
management_public_key='2', status_description=desc)
new_desc = engine.execute(t.select()).fetchone().status_description
self.assertEqual(desc, new_desc)
engine.execute(t.delete())
def _check_008(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'security_groups')
self.assertColumnExists(engine, 'node_groups', 'security_groups')
self.assertColumnExists(engine, 'templates_relations',
'security_groups')
def _check_009(self, engine, date):
self.assertColumnExists(engine, 'clusters', 'rollback_info')
def _check_010(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'auto_security_group')
self.assertColumnExists(engine, 'templates_relations',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'open_ports')
def _check_011(self, engine, date):
self.assertColumnExists(engine, 'clusters', 'sahara_info')
def _check_012(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'availability_zone')
self.assertColumnExists(engine, 'node_groups', 'availability_zone')
self.assertColumnExists(engine, 'templates_relations',
'availability_zone')
def _check_014(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates', 'volume_type')
self.assertColumnExists(engine, 'node_groups', 'volume_type')
self.assertColumnExists(engine, 'templates_relations', 'volume_type')
| apache-2.0 | 281,928,494,503,809,540 | 32.603865 | 79 | 0.565771 | false |
atosorigin/ansible | test/support/windows-integration/plugins/modules/win_lineinfile.py | 68 | 7333 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_lineinfile
short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a file only.
version_added: "2.0"
options:
path:
description:
- The path of the file to modify.
- Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
- Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
type: path
required: yes
aliases: [ dest, destfile, name ]
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
regex:
description:
- The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
aliases: [ "regexp" ]
state:
description:
- Whether the line should be there or not.
type: str
choices: [ absent, present ]
default: present
line:
description:
- Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
- Be aware that the line is processed first on the controller and thus is dependent on yaml quoting rules. Any double quoted line
will have control characters, such as '\r\n', expanded. To print such characters literally, use single or no quotes.
type: str
backrefs:
description:
- Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
type: bool
default: no
insertafter:
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
type: str
choices: [ EOF, '*regex*' ]
default: EOF
insertbefore:
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
C(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
type: str
choices: [ BOF, '*regex*' ]
create:
description:
- Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
type: bool
default: no
validate:
description:
- Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
type: str
encoding:
description:
- Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method -
see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
- This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
specific encoding, the default encoding (UTF-8, no BOM) will be used.
type: str
default: auto
newline:
description:
- Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
line separator will be used for file output regardless of the original line separator that appears in the input file.
type: str
choices: [ unix, windows ]
default: windows
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
seealso:
- module: assemble
- module: lineinfile
author:
- Brian Lloyd (@brianlloyd)
'''
EXAMPLES = r'''
# Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- name: Insert path without converting \r\n
win_lineinfile:
path: c:\file.txt
line: c:\return\new
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^name='
line: 'name=JohnDoe'
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^name='
state: absent
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
- win_lineinfile:
path: C:\Temp\httpd.conf
regex: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- win_lineinfile:
path: C:\Temp\services
regex: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
- name: Create file if it doesn't exist with a specific encoding
win_lineinfile:
path: C:\Temp\utf16.txt
create: yes
encoding: utf-16
line: This is a utf-16 encoded file
- name: Add a line to a file and ensure the resulting file uses unix line separators
win_lineinfile:
path: C:\Temp\testfile.txt
line: Line added to file
newline: unix
- name: Update a line using backrefs
win_lineinfile:
path: C:\Temp\example.conf
backrefs: yes
regex: '(^name=)'
line: '$1JohnDoe'
'''
RETURN = r'''
backup:
description:
- Name of the backup file that was created.
- This is now deprecated, use C(backup_file) instead.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
'''
| gpl-3.0 | -4,842,358,973,892,712,000 | 39.738889 | 152 | 0.691668 | false |
caser789/xuejiao-blog | app/api_1_0/posts.py | 1 | 1747 | from flask import jsonify, request, g, abort, url_for, current_app
from .. import db
from ..models import Post, Permission
from . import api
from .decorators import permission_required
from .errors import forbidden
@api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.paginate(
page, per_page=current_app.config['BLOG_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post():
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, \
{'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
post = Post.query.get_or_404(id)
if g.current_user != post.author and \
not g.current_user.can(Permission.ADMINISTER):
return forbidden('Insufficient permissions')
post.body = request.json.get('body', post.body)
db.session.add(post)
return jsonify(post.to_json())
| mit | 1,953,097,622,053,751,600 | 32.596154 | 77 | 0.643389 | false |
wbyne/QGIS | python/plugins/processing/algs/qgis/HypsometricCurves.py | 2 | 8209 | # -*- coding: utf-8 -*-
"""
***************************************************************************
HypsometricCurves.py
---------------------
Date : November 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import numpy
from osgeo import gdal, ogr, osr
from qgis.core import QgsRectangle, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputDirectory
from processing.tools import raster, vector, dataobjects
class HypsometricCurves(GeoAlgorithm):
INPUT_DEM = 'INPUT_DEM'
BOUNDARY_LAYER = 'BOUNDARY_LAYER'
STEP = 'STEP'
USE_PERCENTAGE = 'USE_PERCENTAGE'
OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Hypsometric curves')
self.group, self.i18n_group = self.trAlgorithm('Raster tools')
self.addParameter(ParameterRaster(self.INPUT_DEM,
self.tr('DEM to analyze')))
self.addParameter(ParameterVector(self.BOUNDARY_LAYER,
self.tr('Boundary layer'), dataobjects.TYPE_VECTOR_POLYGON))
self.addParameter(ParameterNumber(self.STEP,
self.tr('Step'), 0.0, 999999999.999999, 100.0))
self.addParameter(ParameterBoolean(self.USE_PERCENTAGE,
self.tr('Use % of area instead of absolute value'), False))
self.addOutput(OutputDirectory(self.OUTPUT_DIRECTORY,
self.tr('Hypsometric curves')))
def processAlgorithm(self, progress):
rasterPath = self.getParameterValue(self.INPUT_DEM)
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.BOUNDARY_LAYER))
step = self.getParameterValue(self.STEP)
percentage = self.getParameterValue(self.USE_PERCENTAGE)
outputPath = self.getOutputValue(self.OUTPUT_DIRECTORY)
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
rasterBand = rasterDS.GetRasterBand(1)
noData = rasterBand.GetNoDataValue()
cellXSize = abs(geoTransform[1])
cellYSize = abs(geoTransform[5])
rasterXSize = rasterDS.RasterXSize
rasterYSize = rasterDS.RasterYSize
rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize
* rasterYSize, geoTransform[0] + cellXSize
* rasterXSize, geoTransform[3])
rasterGeom = QgsGeometry.fromRect(rasterBBox)
crs = osr.SpatialReference()
crs.ImportFromProj4(str(layer.crs().toProj4()))
memVectorDriver = ogr.GetDriverByName('Memory')
memRasterDriver = gdal.GetDriverByName('MEM')
features = vector.features(layer)
total = 100.0 / len(features)
for current, f in enumerate(features):
geom = f.geometry()
intersectedGeom = rasterGeom.intersection(geom)
if intersectedGeom.isGeosEmpty():
progress.setInfo(
self.tr('Feature %d does not intersect raster or '
'entirely located in NODATA area' % f.id()))
continue
fName = os.path.join(
outputPath, 'hystogram_%s_%s.csv' % (layer.name(), f.id()))
ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.exportToWkt())
bbox = intersectedGeom.boundingBox()
xMin = bbox.xMinimum()
xMax = bbox.xMaximum()
yMin = bbox.yMinimum()
yMax = bbox.yMaximum()
(startColumn, startRow) = raster.mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = raster.mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
if srcOffset[2] == 0 or srcOffset[3] == 0:
progress.setInfo(
self.tr('Feature %d is smaller than raster '
'cell size' % f.id()))
continue
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5]
)
memVDS = memVectorDriver.CreateDataSource('out')
memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon)
ft = ogr.Feature(memLayer.GetLayerDefn())
ft.SetGeometry(ogrGeom)
memLayer.CreateFeature(ft)
ft.Destroy()
rasterizedDS = memRasterDriver.Create('', srcOffset[2],
srcOffset[3], 1, gdal.GDT_Byte)
rasterizedDS.SetGeoTransform(newGeoTransform)
gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1])
rasterizedArray = rasterizedDS.ReadAsArray()
srcArray = numpy.nan_to_num(srcArray)
masked = numpy.ma.MaskedArray(srcArray,
mask=numpy.logical_or(srcArray == noData,
numpy.logical_not(rasterizedArray)))
self.calculateHypsometry(f.id(), fName, progress, masked,
cellXSize, cellYSize, percentage, step)
memVDS = None
rasterizedDS = None
progress.setPercentage(int(current * total))
rasterDS = None
def calculateHypsometry(self, fid, fName, progress, data, pX, pY,
percentage, step):
out = dict()
d = data.compressed()
if d.size == 0:
progress.setInfo(
self.tr('Feature %d does not intersect raster or '
'entirely located in NODATA area' % fid))
return
minValue = d.min()
maxValue = d.max()
startValue = minValue
tmpValue = minValue + step
while startValue < maxValue:
out[tmpValue] = ((startValue <= d) & (d < tmpValue)).sum()
startValue = tmpValue
tmpValue += step
if percentage:
multiplier = 100.0 / len(d.flat)
else:
multiplier = pX * pY
for k, v in out.items():
out[k] = v * multiplier
prev = None
for i in sorted(out.items()):
if prev is None:
out[i[0]] = i[1]
else:
out[i[0]] = i[1] + out[prev]
prev = i[0]
writer = vector.TableWriter(fName, 'utf-8', [self.tr('Area'), self.tr('Elevation')])
for i in sorted(out.items()):
writer.addRecord([i[1], i[0]])
del writer
| gpl-2.0 | 5,283,932,299,736,867,000 | 37.359813 | 102 | 0.542332 | false |
docker/docker-py | tests/integration/api_swarm_test.py | 4 | 9438 | import copy
import docker
import pytest
from ..helpers import force_leave_swarm, requires_api_version
from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
force_leave_swarm(self.client)
self._unlock_key = None
def tearDown(self):
try:
if self._unlock_key:
self.client.unlock_swarm(self._unlock_key)
except docker.errors.APIError:
pass
force_leave_swarm(self.client)
super(SwarmTest, self).tearDown()
@requires_api_version('1.24')
def test_init_swarm_simple(self):
assert self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_force_new_cluster(self):
pytest.skip('Test stalls the engine on 1.12.0')
assert self.init_swarm()
version_1 = self.client.inspect_swarm()['Version']['Index']
assert self.client.init_swarm(force_new_cluster=True)
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_defaults(self):
assert self.init_swarm()
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
assert results['SubnetSize'] == 24
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_only_pool(self):
assert self.init_swarm(default_addr_pool=['2.0.0.0/16'])
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'2.0.0.0/16'}
assert results['SubnetSize'] == 24
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_only_subnet_size(self):
assert self.init_swarm(subnet_size=26)
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
assert results['SubnetSize'] == 26
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_both_args(self):
assert self.init_swarm(default_addr_pool=['2.0.0.0/16', '3.0.0.0/16'],
subnet_size=28)
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'2.0.0.0/16', '3.0.0.0/16'}
assert results['SubnetSize'] == 28
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError):
self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_custom_raft_spec(self):
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
@requires_api_version('1.30')
def test_init_swarm_with_ca_config(self):
spec = self.client.create_swarm_spec(
node_cert_expiry=7776000000000000, ca_force_rotate=6000000000000
)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['CAConfig']['NodeCertExpiry'] == (
spec['CAConfig']['NodeCertExpiry']
)
assert swarm_info['Spec']['CAConfig']['ForceRotate'] == (
spec['CAConfig']['ForceRotate']
)
@requires_api_version('1.25')
def test_init_swarm_with_autolock_managers(self):
spec = self.client.create_swarm_spec(autolock_managers=True)
assert self.init_swarm(swarm_spec=spec)
# save unlock key for tearDown
self._unlock_key = self.client.get_unlock_key()
swarm_info = self.client.inspect_swarm()
assert (
swarm_info['Spec']['EncryptionConfig']['AutoLockManagers'] is True
)
assert self._unlock_key.get('UnlockKey')
@requires_api_version('1.25')
@pytest.mark.xfail(
reason="This doesn't seem to be taken into account by the engine"
)
def test_init_swarm_with_log_driver(self):
spec = {'TaskDefaults': {'LogDriver': {'Name': 'syslog'}}}
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['TaskDefaults']['LogDriver']['Name'] == (
'syslog'
)
@requires_api_version('1.24')
def test_leave_swarm(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.leave_swarm()
exc_info.value.response.status_code == 500
assert self.client.leave_swarm(force=True)
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.inspect_swarm()
exc_info.value.response.status_code == 406
assert self.client.leave_swarm(force=True)
@requires_api_version('1.24')
def test_update_swarm(self):
assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200,
node_cert_expiry=7776000000000000
)
assert self.client.update_swarm(
version=swarm_info_1['Version']['Index'],
swarm_spec=spec, rotate_worker_token=True
)
swarm_info_2 = self.client.inspect_swarm()
assert (
swarm_info_1['Version']['Index'] !=
swarm_info_2['Version']['Index']
)
assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
assert (
swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
)
assert (
swarm_info_1['JoinTokens']['Manager'] ==
swarm_info_2['JoinTokens']['Manager']
)
assert (
swarm_info_1['JoinTokens']['Worker'] !=
swarm_info_2['JoinTokens']['Worker']
)
@requires_api_version('1.24')
def test_list_nodes(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
assert 'ID' in node
assert 'Spec' in node
assert node['Spec']['Role'] == 'manager'
filtered_list = self.client.nodes(filters={
'id': node['ID']
})
assert len(filtered_list) == 1
filtered_list = self.client.nodes(filters={
'role': 'worker'
})
assert len(filtered_list) == 0
@requires_api_version('1.24')
def test_inspect_node(self):
node_id = self.init_swarm()
assert node_id
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
assert node_id == node['ID']
assert node['Version'] == node_data['Version']
@requires_api_version('1.24')
def test_update_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node = nodes_list[0]
orig_spec = node['Spec']
# add a new label
new_spec = copy.deepcopy(orig_spec)
new_spec['Labels'] = {'new.label': 'new value'}
self.client.update_node(node_id=node['ID'],
version=node['Version']['Index'],
node_spec=new_spec)
updated_node = self.client.inspect_node(node['ID'])
assert new_spec == updated_node['Spec']
# Revert the changes
self.client.update_node(node_id=node['ID'],
version=updated_node['Version']['Index'],
node_spec=orig_spec)
reverted_node = self.client.inspect_node(node['ID'])
assert orig_spec == reverted_node['Spec']
@requires_api_version('1.24')
def test_remove_main_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node_id = nodes_list[0]['ID']
with pytest.raises(docker.errors.NotFound):
self.client.remove_node('foobar01')
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id)
assert e.value.response.status_code >= 400
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id, True)
assert e.value.response.status_code >= 400
@requires_api_version('1.25')
def test_rotate_manager_unlock_key(self):
spec = self.client.create_swarm_spec(autolock_managers=True)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
key_1 = self.client.get_unlock_key()
assert self.client.update_swarm(
version=swarm_info['Version']['Index'],
rotate_manager_unlock_key=True
)
key_2 = self.client.get_unlock_key()
assert key_1['UnlockKey'] != key_2['UnlockKey']
@requires_api_version('1.30')
@pytest.mark.xfail(reason='Can fail if eth0 has multiple IP addresses')
def test_init_swarm_data_path_addr(self):
assert self.init_swarm(data_path_addr='eth0')
| apache-2.0 | -8,196,727,219,823,829,000 | 36.011765 | 79 | 0.593452 | false |
sstruct/flasky | tests/test_api.py | 23 | 10686 | import unittest
import json
import re
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User, Role, Post, Comment
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(url_for('api.get_posts'),
content_type='application/json')
self.assertTrue(response.status_code == 200)
def test_bad_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# authenticate with bad password
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# issue a request with a bad token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
# get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
# issue a request with the token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 200)
def test_unconfirmed_account(self):
# add an unconfirmed user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=False,
role=r)
db.session.add(u)
db.session.commit()
# get list of posts with the unconfirmed account
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 403)
def test_posts(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# write an empty post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': ''}))
self.assertTrue(response.status_code == 400)
# write a post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': 'body of the *blog* post'}))
self.assertTrue(response.status_code == 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# get the new post
response = self.client.get(
url,
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'body of the *blog* post')
self.assertTrue(json_response['body_html'] ==
'<p>body of the <em>blog</em> post</p>')
json_post = json_response
# get the post from the user
response = self.client.get(
url_for('api.get_user_posts', id=u.id),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# get the post from the user as a follower
response = self.client.get(
url_for('api.get_user_followed_posts', id=u.id),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# edit post
response = self.client.put(
url,
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': 'updated body'}))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'updated body')
self.assertTrue(json_response['body_html'] == '<p>updated body</p>')
def test_users(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# get users
response = self.client.get(
url_for('api.get_user', id=u1.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'john')
response = self.client.get(
url_for('api.get_user', id=u2.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'susan')
def test_comments(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# add a post
post = Post(body='body of the post', author=u1)
db.session.add(post)
db.session.commit()
# write a comment
response = self.client.post(
url_for('api.new_post_comment', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'),
data=json.dumps({'body': 'Good [post](http://example.com)!'}))
self.assertTrue(response.status_code == 201)
json_response = json.loads(response.data.decode('utf-8'))
url = response.headers.get('Location')
self.assertIsNotNone(url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
self.assertTrue(
re.sub('<.*?>', '', json_response['body_html']) == 'Good post!')
# get the new comment
response = self.client.get(
url,
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
# add another comment
comment = Comment(body='Thank you!', author=u1, post=post)
db.session.add(comment)
db.session.commit()
# get the two comments from the post
response = self.client.get(
url_for('api.get_post_comments', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 2)
# get all the comments
response = self.client.get(
url_for('api.get_comments', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 2)
| mit | 8,774,952,314,524,759,000 | 39.324528 | 77 | 0.580011 | false |
codingforfun/Olena-Mirror | swilena/python/box2d-misc.py | 2 | 1329 | #! /usr/bin/env python
# Copyright (C) 2009 EPITA Research and Development Laboratory (LRDE)
#
# This file is part of Olena.
#
# Olena is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, version 2 of the License.
#
# Olena is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Olena. If not, see <http://www.gnu.org/licenses/>.
from swilena import *
# Generic iterator interface.
b = box2d(2, 3)
p = iter(b)
while p.is_valid():
print p.site()
p.advance()
print
# Python's iterator interface.
# We cannot use
#
# for p in box2d(2, 3):
# print p
#
# here because the box2d is a temporary object that may be collected
# before the end of the iteration. To prevent Python from disposing
# of it, we use a named variable that will
#
# Another possibility would be to have a generator playing with the
# `thisown' field of the box, to prevent its destruction (see
# http://www.swig.org/Doc1.3/SWIGDocumentation.html#Python_nn30).
for p in b:
print p
| gpl-2.0 | 3,881,340,963,983,665,700 | 27.276596 | 72 | 0.732129 | false |
Beauhurst/django | django/contrib/admin/migrations/0001_initial.py | 95 | 1893 | import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(
to_field='id',
on_delete=models.SET_NULL,
blank=True, null=True,
to='contenttypes.ContentType',
verbose_name='content type',
)),
('user', models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name='user',
)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| bsd-3-clause | -2,820,221,301,498,797,000 | 39.276596 | 114 | 0.522451 | false |
vadimtk/chrome4sdp | tools/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor_unittest.py | 24 | 1071 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import unittest
from telemetry import decorators
from telemetry.internal.platform.power_monitor import msr_power_monitor
from telemetry.internal.platform import win_platform_backend
class MsrPowerMonitorTest(unittest.TestCase):
@decorators.Enabled('xp', 'win7', 'win8') # http://crbug.com/479337
def testMsrRuns(self):
platform_backend = win_platform_backend.WinPlatformBackend()
power_monitor = msr_power_monitor.MsrPowerMonitorWin(platform_backend)
if not power_monitor.CanMonitorPower():
logging.warning('Test not supported on this platform.')
return
power_monitor.StartMonitoringPower(None)
time.sleep(0.01)
statistics = power_monitor.StopMonitoringPower()
self.assertEqual(statistics['identifier'], 'msr')
self.assertIn('energy_consumption_mwh', statistics)
self.assertGreater(statistics['energy_consumption_mwh'], 0)
| bsd-3-clause | 958,169,318,238,568,700 | 35.931034 | 74 | 0.764706 | false |
alexallah/django | django/forms/widgets.py | 1 | 35356 | """
HTML Widget classes
"""
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
class Media:
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
attrs = base_attrs.copy()
if extra_attrs is not None:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context.update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format if format else None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
default = (None, [], 0)
groups = [default]
has_selected = False
for option_value, option_label in chain(self.choices):
if option_value is None:
option_value = ''
if isinstance(option_label, (list, tuple)):
index = groups[-1][2] + 1
subindex = 0
subgroup = []
groups.append((option_value, subgroup, index))
choices = option_label
else:
index = len(default[1])
subgroup = default[1]
subindex = None
choices = [(option_value, option_label)]
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
if selected and not has_selected:
has_selected = True
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': str(value),
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
context['wrap_label'] = True
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = 'multiple'
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return (isinstance(value, str) and not bool(value)) or value is None
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('1', _('Unknown')),
('2', _('Yes')),
('3', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
return '1'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, i) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_attrs = context['widget']['attrs'].copy()
year_name = self.year_field % name
year_attrs['id'] = 'id_%s' % year_name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs=year_attrs,
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_attrs = context['widget']['attrs'].copy()
month_name = self.month_field % name
month_attrs['id'] = 'id_%s' % month_name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs=month_attrs,
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_attrs = context['widget']['attrs'].copy()
day_name = self.day_field % name
day_attrs['id'] = 'id_%s' % day_name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs=day_attrs,
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
d = datetime.datetime.strptime(value, input_format)
year, month, day = d.year, d.month, d.day
except ValueError:
pass
match = self.date_re.match(value)
if match:
year, month, day = [int(val) for val in match.groups()]
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
| bsd-3-clause | -4,917,147,239,116,290,000 | 33.028874 | 115 | 0.581203 | false |
dcramer/django-compositepks | django/db/models/fields/related.py | 13 | 42537 | from django.db import connection, transaction
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy, string_concat, ungettext, ugettext as _
from django.utils.functional import curry
from django.core import exceptions
from django import forms
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Add an accessor to allow easy determination of the related query path for this field
self.related_query_name = curry(self._get_related_query_name, cls._meta)
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {'class': cls.__name__.lower()}
other = self.rel.to
if isinstance(other, basestring):
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, related)
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v, field = value, None
try:
while True:
v, field = getattr(v, v._meta.pk.name), v._meta.pk
except AttributeError:
pass
if field:
if lookup_type in ('range', 'in'):
v = [v]
v = field.get_db_prep_lookup(lookup_type, v)
if isinstance(v, list):
v = v[0]
return v
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitally allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt']:
return [pk_trace(value)]
if lookup_type in ('range', 'in'):
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = '_%s_cache' % related.get_accessor_name()
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.field.name
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
rel_field = self.related.field
rel_model = self.related.model
# Dynamically create a class that subclasses the related
# model's default manager.
superclass = self.related.model._default_manager.__class__
class RelatedManager(superclass):
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist, "%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def create_many_related_manager(superclass, through=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.through = through
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
return superclass.get_query_set(self)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if through is None:
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if through is not None:
raise AttributeError, "Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
new_obj = super(ManyRelatedManager, self).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
obj, created = \
super(ManyRelatedManager, self).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
existing_ids = set([row[0] for row in cursor.fetchall()])
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn(self.related.field.m2m_db_table()),
source_col_name=qn(self.related.field.m2m_reverse_name()),
target_col_name=qn(self.related.field.m2m_column_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.related.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=(self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table=qn(self.field.m2m_db_table()),
source_col_name=qn(self.field.m2m_column_name()),
target_col_name=qn(self.field.m2m_reverse_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.multiple = True
self.parent_link = parent_link
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
lookup_overrides=lookup_overrides, parent_link=parent_link)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
parent_link=kwargs.pop('parent_link', False))
Field.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.complex_filter(
self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type()
return rel_field.db_type()
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
if not cls._meta.one_to_one_field:
cls._meta.one_to_one_field = self
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
self.creates_table = False
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
else:
self.creates_table = True
Field.__init__(self, **kwargs)
msg = ugettext_lazy('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through_model._meta.db_table
elif self.db_table:
return self.db_table
else:
return '%s_%s' % (opts.db_table, self.name)
def _get_m2m_column_name(self, related):
"Function that can be curried to provide the source column name for the m2m table"
try:
return self._m2m_column_name_cache
except:
if self.rel.through is not None:
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
self._m2m_column_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_column_name_cache = 'from_' + related.model._meta.object_name.lower() + '_id'
else:
self._m2m_column_name_cache = related.model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_column_name_cache
def _get_m2m_reverse_name(self, related):
"Function that can be curried to provide the related column name for the m2m table"
try:
return self._m2m_reverse_name_cache
except:
if self.rel.through is not None:
found = False
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
self._m2m_reverse_name_cache = f.column
break
else:
found = True
else:
self._m2m_reverse_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_reverse_name_cache = 'to_' + related.parent_model._meta.object_name.lower() + '_id'
else:
self._m2m_reverse_name_cache = related.parent_model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_reverse_name_cache
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except ValueError:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise exceptions.ValidationError(
ungettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.",
len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
})
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and self.rel.to == "self" and self.rel.related_name is None:
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through_model = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
elif self.rel.through:
self.rel.through_model = self.rel.through
self.rel.through = self.rel.through._meta.object_name
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.
if related.model != related.parent_model or not self.rel.symmetrical:
# Add the descriptor for the m2m relation
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_column_name, related)
self.m2m_reverse_name = curry(self._get_m2m_reverse_name, related)
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.complex_filter(self.rel.limit_choices_to)}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
defaults['initial'] = [i._get_pk_val() for i in defaults['initial']]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| bsd-3-clause | -9,099,354,057,314,996,000 | 44.936285 | 222 | 0.604368 | false |
Agana/MyBlogAgain | django/utils/synch.py | 376 | 2549 | """
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by [email protected])
"""
try:
import threading
except ImportError:
import dummy_threading as threading
class RWLock:
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
finally:
self.mutex.release()
self.can_read.acquire()
def reader_leaves(self):
self.mutex.acquire()
try:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
finally:
self.mutex.release()
def writer_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
finally:
self.mutex.release()
self.can_write.acquire()
def writer_leaves(self):
self.mutex.acquire()
try:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
finally:
self.mutex.release()
| bsd-3-clause | -5,419,001,903,776,602,000 | 28.298851 | 99 | 0.534327 | false |
fernandezcuesta/ansible | contrib/inventory/openstack.py | 64 | 8980 | #!/usr/bin/env python
# Copyright (c) 2012, Marco Vito Moscaritolo <[email protected]>
# Copyright (c) 2013, Jesse Keating <[email protected]>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# The OpenStack Inventory module uses os-client-config for configuration.
# https://github.com/openstack/os-client-config
# This means it will either:
# - Respect normal OS_* environment variables like other OpenStack tools
# - Read values from a clouds.yaml file.
# If you want to configure via clouds.yaml, you can put the file in:
# - Current directory
# - ~/.config/openstack/clouds.yaml
# - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will connect to
# all of them and present them as one contiguous inventory.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
# the inventory section.
# expand_hostvars controls whether or not the inventory will make extra API
# calls to fill out additional information about each server
# use_hostnames changes the behavior from registering every host with its UUID
# and making a group of its hostname to only doing this if the
# hostname in question has more than one server
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
import argparse
import collections
import os
import sys
import time
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
import os_client_config
import shade
import shade.inventory
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
def get_groups_from_server(server_vars, namegroup=True):
groups = []
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud
groups.append(cloud)
# Create a group on region
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def get_host_groups(inventory, refresh=False):
(cache_file, cache_expiration_time) = get_cache_settings()
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups)
else:
groups = open(cache_file, 'r').read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server)
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
list_args = {}
if hasattr(inventory, 'extra_config'):
use_hostnames = inventory.extra_config['use_hostnames']
list_args['expand'] = inventory.extra_config['expand_hostvars']
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
list_args['fail_on_cloud_config'] = \
inventory.extra_config['fail_on_errors']
else:
use_hostnames = False
for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
groups['_meta'] = {'hostvars': hostvars}
return groups
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
if refresh:
return True
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if (mod_time + cache_expiration_time) > current_time:
return False
return True
def get_cache_settings():
config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
return (cache_file, cache_expiration_time)
def to_json(in_dict):
return json.dumps(in_dict, sort_keys=True, indent=2)
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--private',
action='store_true',
help='Use private address for ansible host')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
try:
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
shade.simple_logging(debug=args.debug)
inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
)
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
config_key='ansible',
config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}
))
inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
output = get_host_groups(inventory, refresh=args.refresh)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
except shade.OpenStackCloudException as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 | -208,964,337,351,742,720 | 35.356275 | 78 | 0.644098 | false |
mogoweb/chromium-crosswalk | chrome/test/mini_installer/file_verifier.py | 125 | 1116 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import verifier
class FileVerifier(verifier.Verifier):
"""Verifies that the current files match the expectation dictionaries."""
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Overridden from verifier.Verifier.
This method will throw an AssertionError if file state doesn't match the
|expectation|.
Args:
expectation_name: Path to the file being verified. It is expanded using
Expand.
expectation: A dictionary with the following key and value:
'exists' a boolean indicating whether the file should exist.
variable_expander: A VariableExpander object.
"""
file_path = variable_expander.Expand(expectation_name)
file_exists = os.path.exists(file_path)
assert expectation['exists'] == file_exists, \
('File %s exists' % file_path) if file_exists else \
('File %s is missing' % file_path)
| bsd-3-clause | 5,927,250,617,274,627,000 | 35 | 77 | 0.694444 | false |
AmesianX/amoco | amoco/arch/arm/v8/asm64.py | 6 | 15185 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2013 Axel Tillequin ([email protected])
# published under GPLv2 license
from amoco.logger import Log
logger = Log(__name__)
from .env64 import *
from .utils import *
from amoco.cas.utils import *
def i_ADC(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = AddWithCarry(op1, op2, fmap(C))
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_SBC(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = SubWithBorrow(op1, op2, fmap(C))
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_ADD(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = AddWithCarry(op1,op2)
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_SUB(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = SubWithBorrow(op1,op2)
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_ADR(i,fmap):
fmap[pc] = fmap[pc]+i.length
base = fmap(pc)
fmap[i.d] = base+i.imm
def i_ADRP(i,fmap):
fmap[pc] = fmap[pc]+i.length
base = fmap(pc)
base[0:12]=cst(0,12)
fmap[i.d] = base+i.imm
def i_AND(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 & src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_ORR(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 | src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_ORN(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 | ~src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_EOR(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 ^ src2)
fmap[dst] = fmap(x)
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_EON(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 ^ ~src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_ASRV(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = fmap(i.n>>i.m)
def i_Bcond(i,fmap):
cond = fmap(i.cond)
fmap[pc] = tst(cond, fmap[pc]+i.offset, fmap[pc]+i.length)
def i_B(i,fmap):
fmap[pc] = fmap[pc]+i.offset
def i_BL(i,fmap):
fmap[r30] = fmap[pc]+i.length
fmap[pc] = fmap[pc]+i.offset
def i_BR(i,fmap):
fmap[pc] = fmap(i.n)
def i_BLR(i,fmap):
fmap[r30] = fmap[pc]+i.length
fmap[pc] = fmap(i.n)
def i_BFM(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst = cst(0,i.datasize) if i.inzero else fmap(i.d)
src = fmap(i.n)
lo = (dst & ~i.wmask) | (ROR(src,i.immr.value) & i.wmask)
sta,sto = i.imms.value,i.imms.value+1
hi = composer([src[sta:sto]]*i.datasize) if i.extend else dst
fmap[i.d] = (hi & ~i.tmask) | (lo & i.tmask)
i_SBFM = i_BFM
i_UBFM = i_BFM
def i_BIC(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1 = fmap(i.n)
op2 = fmap(i.m)
if i.invert: op2 = ~op2
_r = op1 & op2
fmap[i.d] = _r
if i.setflags:
fmap[C] = bit0
fmap[V] = bit0
fmap[Z] = _r==0
fmap[N] = _r[_r.size-1:_r.size]
def i_BRK(i,fmap):
fmap[pc] = fmap[pc]+i.length
ext('BRK %s'%i.imm,size=pc.size).call(fmap)
def i_CBNZ(i,fmap):
fmap[pc] = tst(fmap(i.t!=0), fmap[pc]+i.offset, fmap[pc]+i.length)
def i_CBZ(i,fmap):
fmap[pc] = tst(fmap(i.t==0), fmap[pc]+i.offset, fmap[pc]+i.length)
def i_CCMN(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1, op2, nzcv, cond = i.operands
_r, carry, overflow = AddWithCarry(fmap(op1),fmap(op2))
fmap[N] = tst(fmap(cond), _r<0 , i.flags[0])
fmap[Z] = tst(fmap(cond), _r==0 , i.flags[1])
fmap[C] = tst(fmap(cond), carry , i.flags[2])
fmap[V] = tst(fmap(cond), overflow, i.flags[3])
def i_CCMP(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1, op2, nzcv, cond = i.operands
_r, carry, overflow = SubWithBorrow(fmap(op1),fmap(op2))
fmap[N] = tst(fmap(cond), _r<0 , i.flags[0])
fmap[Z] = tst(fmap(cond), _r==0 , i.flags[1])
fmap[C] = tst(fmap(cond), carry , i.flags[2])
fmap[V] = tst(fmap(cond), overflow, i.flags[3])
def i_CLREX(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_CLS(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.d.size)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_CLZ(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.d.size)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_CSEL(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(op2))
def i_CSINC(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(op2)+1)
def i_CSINV(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(~op2))
def i_CSNEG(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(-op2))
def i_DCPS1(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DCPS2(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DCPS3(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DMB(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DRPS(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DSB(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_ISB(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_ERET(i,fmap):
fmap[pc] = top(64)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_EXTR(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, lsb = i.operands
concat = composer(fmap(op2),fmap(op1))
result = concat[lsb:lsb+i.datasize]
fmap[dst] = result
def i_HINT(i,fmap):
fmap[pc] = fmap[pc]+i.length
if i.imm>0:
logger.warning('semantic undefined for %s(%d)'%(i.mnemonic,i.imm))
def i_HLT(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_HVC(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_LDAR(i,fmap):
fmap[pc] = fmap[pc]+i.length
data = fmap(mem(i.n,i.datasize))
if i.pair:
if not i.excl: raise InstructionError(i)
if i.elsize==32:
if internals['endianstate']==0:
fmap[i.t] = data[0:i.elsize]
fmap[i.t2] = data[i.elsize:i.datasize]
else:
fmap[i.t] = data[i.elsize:i.datasize]
fmap[i.t2] = data[0:i.elsize]
else:
fmap[i.t] = fmap(mem(i.n, 64))
fmap[i.t2] = fmap(mem(i.n, 64, disp=8))
else:
fmap[i.t] = data.zeroextend(i.regsize)
i_LDARB = i_LDAR
i_LDARH = i_LDAR
i_LDAXP = i_LDAR
i_LDAXR = i_LDAR
i_LDAXRB = i_LDAR
i_LDAXRH = i_LDAR
i_LDXP = i_LDAR
i_LDXR = i_LDAR
i_LDXRB = i_LDAR
i_LDXRH = i_LDAR
def i_STLR(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = fmap(i.n)
if i.pair:
if not i.excl: raise InstructionError(i)
if internals['endianstate']==0:
data = composer(i.t,i.t2)
else:
data = composer(i.t2,i.t)
else:
data = i.t
if i.excl:
fmap[i.s] = cst(1,32)
fmap[address] = fmap(data)
i_STLRB = i_STLR
i_STLRH = i_STLR
i_STLXP = i_STLR
i_STLXR = i_STLR
i_STLXRB = i_STLR
i_STLXRH = i_STLR
i_STXP = i_STLR
i_STXR = i_STLR
i_STXRB = i_STLR
i_STXRH = i_STLR
def i_LDP(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = i.n
if not i.postindex: address += i.offset
data1 = mem(address,i.datasize)
data2 = mem(address,i.datasize, disp=i.datasize/8)
fmap[i.t] = fmap(data1)
fmap[i.t2] = fmap(data2)
if i.wback:
if i.postindex: address += i.offset
fmap[i.n] = fmap(address)
def i_STP(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = i.n
if not i.postindex: address += i.offset
data1 = fmap(i.t)
data2 = fmap(i.t2)
fmap[mem(address,i.datasize)] = data1
fmap[mem(address,i.datasize,disp=i.datasize/8)] = data2
if i.wback:
if i.postindex: address += i.offset
fmap[i.n] = fmap(address)
i_LDNP = i_LDP
i_STNP = i_STP
def i_LDPSW(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = i.n
if not i.postindex: address += i.offset
data1 = mem(address,i.datasize)
data2 = mem(address,i.datasize, disp=i.datasize/8)
fmap[i.t] = fmap(data1).signextend(64)
fmap[i.t2] = fmap(data2).signextend(64)
if i.wback:
if i.postindex: address += i.offset
fmap[i.n] = fmap(address)
def i_LDR(i,fmap):
if len(i.operands)==3:
fmap[pc] = fmap[pc]+i.length
Xt, Xn, offset = i.operands
address = Xn
if not i.postindex: address += offset
data = mem(address,i.datasize)
if i.signed:
fmap[Xt] = data.signextend(i.regsize)
else:
fmap[Xt] = data.zeroextend(i.regsize)
if i.wback:
if i.postindex: address += offset
fmap[Xn] = fmap(address)
else:# literal case:
Xt, offset = i.operands
address = fmap[pc] + offset
fmap[pc] = fmap[pc]+i.length
data = mem(address,i.size)
if i.signed:
fmap[Xt] = fmap(data.signextend(64))
else:
fmap[Xt] = fmap(data.zeroextend(64))
i_LDRB = i_LDR
i_LDRH = i_LDR
i_LDRSB = i_LDR
i_LDRSH = i_LDR
i_LDRSW = i_LDR
i_LDTR = i_LDR
i_LDTRB = i_LDR
i_LDTRH = i_LDR
i_LDTRSB = i_LDR
i_LDTRSH = i_LDR
i_LDTRSW = i_LDR
i_LDUR = i_LDR
i_LDURB = i_LDR
i_LDURH = i_LDR
i_LDURSB = i_LDR
i_LDURSH = i_LDR
i_LDURSW = i_LDR
def i_LSLV(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2 = i.operands
op1.sf=False
fmap[dst] = fmap(op1<<op2)
def i_LSRV(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2 = i.operands
op1.sf=False
fmap[dst] = fmap(op1>>op2)
def i_MADD(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = fmap(i.a + i.r*i.m)
def i_MSUB(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = fmap(i.a - i.r*i.m)
def i_MOVK(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = fmap(i.d)
result[0:16] = i.imm
fmap[i.d] = result
def i_MOVZ(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = cst(0,i.d.size)
result[0:16] = i.imm
fmap[i.d] = result
def i_MOVN(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = cst(0,i.d.size)
result[0:16] = i.imm
fmap[i.d] = ~result
def i_MRS(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_MSR(i,fmap):
fmap[pc] = fmap[pc]+i.length
pstatefield, op2 = i.operands
fmap[pstatefield] = op2[0:pstatefield.size]
def i_PRFM(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_RBIT(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_REV16(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_REV32(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_REV(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_RET(i,fmap):
fmap[pc] = fmap(i.n)
def i_RORV(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = ROR(fmap(i.n),fmap(i.m))
def i_SDIV(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = fmap(i.n),fmap(i.m)
op1.sf = op2.sf = True
fmap[i.d] = op1/op2
def i_UDIV(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = fmap(i.n),fmap(i.m)
op1.sf = op2.sf = False
fmap[i.d] = op1/op2
def i_SMADDL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a + (i.n**i.m))
_x.sf = True
fmap[i.d] = _x
def i_SMSUBL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a - (i.n**i.m))
_x.sf = True
fmap[i.d] = _x
def i_UMADDL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a + (i.n**i.m))
_x.sf = False
fmap[i.d] = _x
def i_UMSUBL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a - (i.n**i.m))
_x.sf = False
fmap[i.d] = _x
def i_SMULH(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = fmap(i.n**i.m)
result.sf = True
fmap[i.d] = result[64:128]
def i_UMULH(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = fmap(i.n**i.m)
result.sf = False
fmap[i.d] = result[64:128]
def i_STR(i,fmap):
if len(i.operands)==3:
fmap[pc] = fmap[pc]+i.length
Xt, Xn, offset = i.operands
address = Xn
if not i.postindex: address += offset
dst = mem(address,i.datasize)
data = fmap(Xt)
fmap[dst] = data[0:i.datasize]
if i.wback:
if i.postindex: address += offset
fmap[Xn] = fmap(address)
i_STRB = i_STR
i_STRH = i_STR
i_STTR = i_STR
i_STTRB = i_STR
i_STTRH = i_STR
i_STUR = i_STR
i_STURB = i_STR
i_STURH = i_STR
def i_SMC(i,fmap):
fmap[pc] = fmap[pc]+i.length
ext('EXCEPTION.EL3 %s'%i.imm,size=pc.size).call(fmap)
def i_SVC(i,fmap):
fmap[pc] = fmap[pc]+i.length
ext('EXCEPTION.EL1 %s'%i.imm,size=pc.size).call(fmap)
def i_SYS(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_SYSL(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.t] = top(i.t.size)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_TBNZ(i,fmap):
op = fmap(i.t)
fmap[pc] = tst(op[i.bitpos:i.bitpos+1]==1, fmap[pc]+i.offset, fmap[pc]+i.length)
def i_TBZ(i,fmap):
op = fmap(i.t)
fmap[pc] = tst(op[i.bitpos:i.bitpos+1]==0, fmap[pc]+i.offset, fmap[pc]+i.length)
| gpl-2.0 | 4,208,071,343,236,591,000 | 24.957265 | 84 | 0.569114 | false |
akirk/youtube-dl | youtube_dl/extractor/folketinget.py | 92 | 2651 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_parse_qs
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
xpath_text,
)
class FolketingetIE(InfoExtractor):
IE_DESC = 'Folketinget (ft.dk; Danish parliament)'
_VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx'
_TEST = {
'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player',
'md5': '6269e8626fa1a891bf5369b386ae996a',
'info_dict': {
'id': '1165642',
'ext': 'mp4',
'title': 'Åbent samråd i Erhvervsudvalget',
'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet',
'view_count': int,
'width': 768,
'height': 432,
'tbr': 928000,
'timestamp': 1416493800,
'upload_date': '20141120',
'duration': 3960,
},
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<',
webpage, 'description', fatal=False)
player_params = compat_parse_qs(self._search_regex(
r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"',
webpage, 'player params'))
xml_url = player_params['xml'][0]
doc = self._download_xml(xml_url, video_id)
timestamp = parse_iso8601(xpath_text(doc, './/date'))
duration = parse_duration(xpath_text(doc, './/duration'))
width = int_or_none(xpath_text(doc, './/width'))
height = int_or_none(xpath_text(doc, './/height'))
view_count = int_or_none(xpath_text(doc, './/views'))
formats = [{
'format_id': n.attrib['bitrate'],
'url': xpath_text(n, './url', fatal=True),
'tbr': int_or_none(n.attrib['bitrate']),
} for n in doc.findall('.//streams/stream')]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'timestamp': timestamp,
'width': width,
'height': height,
'duration': duration,
'view_count': view_count,
}
| unlicense | -5,576,901,220,354,292,000 | 33.337662 | 113 | 0.530257 | false |
leemac/JellyfishRss | rss/migrations/0001_initial.py | 1 | 8815 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Folder'
db.create_table(u'rss_folder', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.TextField')()),
('color', self.gf('django.db.models.fields.TextField')(max_length=20, blank=True)),
))
db.send_create_signal(u'rss', ['Folder'])
# Adding model 'Subscription'
db.create_table(u'rss_subscription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_crawled', self.gf('django.db.models.fields.CharField')(max_length=200)),
('url', self.gf('django.db.models.fields.TextField')()),
('site_url', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.TextField')()),
('favicon_url', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'rss', ['Subscription'])
# Adding model 'SubscriptionUserRelation'
db.create_table(u'rss_subscriptionuserrelation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('subscription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss.Subscription'])),
('folder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss.Folder'])),
))
db.send_create_signal(u'rss', ['SubscriptionUserRelation'])
# Adding model 'SubscriptionItem'
db.create_table(u'rss_subscriptionitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('published', self.gf('django.db.models.fields.DateTimeField')()),
('title', self.gf('django.db.models.fields.TextField')()),
('url', self.gf('django.db.models.fields.TextField')()),
('is_read', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_favorite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('subscription', self.gf('django.db.models.fields.related.ForeignKey')(related_name='item', to=orm['rss.Subscription'])),
))
db.send_create_signal(u'rss', ['SubscriptionItem'])
def backwards(self, orm):
# Deleting model 'Folder'
db.delete_table(u'rss_folder')
# Deleting model 'Subscription'
db.delete_table(u'rss_subscription')
# Deleting model 'SubscriptionUserRelation'
db.delete_table(u'rss_subscriptionuserrelation')
# Deleting model 'SubscriptionItem'
db.delete_table(u'rss_subscriptionitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rss.folder': {
'Meta': {'object_name': 'Folder'},
'color': ('django.db.models.fields.TextField', [], {'max_length': '20', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'rss.subscription': {
'Meta': {'object_name': 'Subscription'},
'color': ('django.db.models.fields.TextField', [], {'max_length': '20', 'blank': 'True'}),
'favicon_url': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_crawled': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site_url': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
u'rss.subscriptionitem': {
'Meta': {'object_name': 'SubscriptionItem'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published': ('django.db.models.fields.DateTimeField', [], {}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'item'", 'to': u"orm['rss.Subscription']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
u'rss.subscriptionuserrelation': {
'Meta': {'object_name': 'SubscriptionUserRelation'},
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rss.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rss.Subscription']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['rss'] | mit | -3,279,557,934,306,948,000 | 61.084507 | 195 | 0.568009 | false |
evilpie/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treebuilders/dom.py | 920 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| mpl-2.0 | 1,695,116,534,286,119,700 | 36.30837 | 85 | 0.531822 | false |
guarddogofww/cs108test | src/jarabe/model/shell.py | 3 | 28033 | # Copyright (C) 2006-2007 Owen Williams.
# Copyright (C) 2006-2008 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
from gi.repository import Gio
from gi.repository import Wnck
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkX11
from gi.repository import GLib
import dbus
from sugar3 import dispatch
from sugar3 import profile
from gi.repository import SugarExt
from jarabe.model.bundleregistry import get_registry
_SERVICE_NAME = 'org.laptop.Activity'
_SERVICE_PATH = '/org/laptop/Activity'
_SERVICE_INTERFACE = 'org.laptop.Activity'
_model = None
class Activity(GObject.GObject):
"""Activity which appears in the "Home View" of the Sugar shell
This class stores the Sugar Shell's metadata regarding a
given activity/application in the system. It interacts with
the sugar3.activity.* modules extensively in order to
accomplish its tasks.
"""
__gtype_name__ = 'SugarHomeActivity'
__gsignals__ = {
'pause': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'resume': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'stop': (GObject.SignalFlags.RUN_LAST, GObject.TYPE_BOOLEAN, ([])),
}
LAUNCHING = 0
LAUNCH_FAILED = 1
LAUNCHED = 2
def __init__(self, activity_info, activity_id, color, window=None):
"""Initialise the HomeActivity
activity_info -- sugar3.activity.registry.ActivityInfo instance,
provides the information required to actually
create the new instance. This is, in effect,
the "type" of activity being created.
activity_id -- unique identifier for this instance
of the activity type
_windows -- WnckWindows registered for the activity. The lowest
one in the stack is the main window.
"""
GObject.GObject.__init__(self)
self._windows = []
self._service = None
self._shell_windows = []
self._activity_id = activity_id
self._activity_info = activity_info
self._launch_time = time.time()
self._launch_status = Activity.LAUNCHING
if color is not None:
self._color = color
else:
self._color = profile.get_color()
if window is not None:
self.add_window(window)
self._retrieve_service()
self._name_owner_changed_handler = None
if not self._service:
bus = dbus.SessionBus()
self._name_owner_changed_handler = bus.add_signal_receiver(
self._name_owner_changed_cb,
signal_name='NameOwnerChanged',
dbus_interface='org.freedesktop.DBus')
self._launch_completed_hid = \
get_model().connect('launch-completed',
self.__launch_completed_cb)
self._launch_failed_hid = get_model().connect('launch-failed',
self.__launch_failed_cb)
def get_launch_status(self):
return self._launch_status
launch_status = GObject.property(getter=get_launch_status)
def add_window(self, window, is_main_window=False):
"""Add a window to the windows stack."""
if not window:
raise ValueError('window must be valid')
self._windows.append(window)
if is_main_window:
window.connect('state-changed', self._state_changed_cb)
def push_shell_window(self, window):
"""Attach a shell run window (eg. view source) to the activity."""
self._shell_windows.append(window)
def pop_shell_window(self, window):
"""
Detach a shell run window (eg. view source) to the activity.
Only call this on **user initiated** deletion (loop issue).
"""
self._shell_windows.remove(window)
def has_shell_window(self):
return bool(self._shell_windows)
def stop(self):
# For web activities the Apisocket will connect to the 'stop'
# signal, thus preventing the window close. Then, on the
# 'activity.close' method, it will call close_window()
# directly.
close_window = not self.emit('stop')
if close_window:
self.close_window()
def close_window(self):
if self.get_window() is not None:
self.get_window().close(GLib.get_current_time())
for w in self._shell_windows:
w.destroy()
def remove_window_by_xid(self, xid):
"""Remove a window from the windows stack."""
for wnd in self._windows:
if wnd.get_xid() == xid:
self._windows.remove(wnd)
return True
return False
def get_service(self):
"""Get the activity service
Note that non-native Sugar applications will not have
such a service, so the return value will be None in
those cases.
"""
return self._service
def get_title(self):
"""Retrieve the application's root window's suggested title"""
if self._windows:
return self._windows[0].get_name()
else:
return None
def get_icon_path(self):
"""Retrieve the activity's icon (file) name"""
if self.is_journal():
icon_theme = Gtk.IconTheme.get_default()
info = icon_theme.lookup_icon('activity-journal',
Gtk.IconSize.SMALL_TOOLBAR, 0)
if not info:
return None
fname = info.get_filename()
del info
return fname
elif self._activity_info:
return self._activity_info.get_icon()
else:
return None
def get_icon_color(self):
"""Retrieve the appropriate icon colour for this activity
Uses activity_id to index into the PresenceService's
set of activity colours, if the PresenceService does not
have an entry (implying that this is not a Sugar-shared application)
uses the local user's profile colour for the icon.
"""
return self._color
def get_activity_id(self):
"""Retrieve the "activity_id" passed in to our constructor
This is a "globally likely unique" identifier generated by
sugar3.util.unique_id
"""
return self._activity_id
def get_bundle_id(self):
""" Returns the activity's bundle id"""
if self._activity_info is None:
return None
else:
return self._activity_info.get_bundle_id()
def get_xid(self):
"""Retrieve the X-windows ID of our root window"""
if self._windows:
return self._windows[0].get_xid()
else:
return None
def has_xid(self, xid):
"""Check if an X-window with the given xid is in the windows stack"""
if self._windows:
for wnd in self._windows:
if wnd.get_xid() == xid:
return True
return False
def get_window(self):
"""Retrieve the X-windows root window of this application
This was stored by the add_window method, which was
called by HomeModel._add_activity, which was called
via a callback that looks for all 'window-opened'
events.
We keep a stack of the windows. The lowest window in the
stack that is still valid we consider the main one.
HomeModel currently uses a dbus service query on the
activity to determine to which HomeActivity the newly
launched window belongs.
"""
if self._windows:
return self._windows[0]
return None
def get_type(self):
"""Retrieve the activity bundle id for future reference"""
if not self._windows:
return None
else:
return SugarExt.wm_get_bundle_id(self._windows[0].get_xid())
def is_journal(self):
"""Returns boolean if the activity is of type JournalActivity"""
return self.get_type() == 'org.laptop.JournalActivity'
def get_launch_time(self):
"""Return the time at which the activity was first launched
Format is floating-point time.time() value
(seconds since the epoch)
"""
return self._launch_time
def get_pid(self):
"""Returns the activity's PID"""
if not self._windows:
return None
return self._windows[0].get_pid()
def get_bundle_path(self):
"""Returns the activity's bundle directory"""
if self._activity_info is None:
return None
else:
return self._activity_info.get_path()
def get_activity_name(self):
"""Returns the activity's bundle name"""
if self._activity_info is None:
return None
else:
return self._activity_info.get_name()
def equals(self, activity):
if self._activity_id and activity.get_activity_id():
return self._activity_id == activity.get_activity_id()
if self._windows[0].get_xid() and activity.get_xid():
return self._windows[0].get_xid() == activity.get_xid()
return False
def _get_service_name(self):
if self._activity_id:
return _SERVICE_NAME + self._activity_id
else:
return None
def _retrieve_service(self):
if not self._activity_id:
return
try:
bus = dbus.SessionBus()
proxy = bus.get_object(self._get_service_name(),
_SERVICE_PATH + '/' + self._activity_id)
self._service = dbus.Interface(proxy, _SERVICE_INTERFACE)
except dbus.DBusException:
self._service = None
def _name_owner_changed_cb(self, name, old, new):
if name == self._get_service_name():
if old and not new:
logging.debug('Activity._name_owner_changed_cb: '
'activity %s went away', name)
self._name_owner_changed_handler.remove()
self._name_owner_changed_handler = None
self._service = None
elif not old and new:
logging.debug('Activity._name_owner_changed_cb: '
'activity %s started up', name)
self._retrieve_service()
self.set_active(True)
def set_active(self, state):
"""Propagate the current state to the activity object"""
if self._service is not None:
self._service.SetActive(state,
reply_handler=self._set_active_success,
error_handler=self._set_active_error)
def _set_active_success(self):
pass
def _set_active_error(self, err):
logging.error('set_active() failed: %s', err)
def _set_launch_status(self, value):
get_model().disconnect(self._launch_completed_hid)
get_model().disconnect(self._launch_failed_hid)
self._launch_completed_hid = None
self._launch_failed_hid = None
self._launch_status = value
self.notify('launch_status')
def __launch_completed_cb(self, model, home_activity):
if home_activity is self:
self._set_launch_status(Activity.LAUNCHED)
def __launch_failed_cb(self, model, home_activity):
if home_activity is self:
self._set_launch_status(Activity.LAUNCH_FAILED)
def _state_changed_cb(self, main_window, changed_mask, new_state):
if changed_mask & Wnck.WindowState.MINIMIZED:
if new_state & Wnck.WindowState.MINIMIZED:
self.emit('pause')
else:
self.emit('resume')
class ShellModel(GObject.GObject):
"""Model of the shell (activity management)
The ShellModel is basically the point of registration
for all running activities within Sugar. It traps
events that tell the system there is a new activity
being created (generated by the activity factories),
or removed, as well as those which tell us that the
currently focussed activity has changed.
The HomeModel tracks a set of HomeActivity instances,
which are tracking the window to activity mappings
the activity factories have set up.
"""
__gsignals__ = {
'activity-added': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'activity-removed': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'active-activity-changed': (GObject.SignalFlags.RUN_FIRST,
None,
([GObject.TYPE_PYOBJECT])),
'tabbing-activity-changed': (GObject.SignalFlags.RUN_FIRST,
None,
([GObject.TYPE_PYOBJECT])),
'launch-started': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'launch-completed': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'launch-failed': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
}
ZOOM_MESH = 0
ZOOM_GROUP = 1
ZOOM_HOME = 2
ZOOM_ACTIVITY = 3
def __init__(self):
GObject.GObject.__init__(self)
self._screen = Wnck.Screen.get_default()
self._screen.connect('window-opened', self._window_opened_cb)
self._screen.connect('window-closed', self._window_closed_cb)
self._screen.connect('active-window-changed',
self._active_window_changed_cb)
self.zoom_level_changed = dispatch.Signal()
self._desktop_level = self.ZOOM_HOME
self._zoom_level = self.ZOOM_HOME
self._current_activity = None
self._activities = []
self._shared_activities = {}
self._active_activity = None
self._tabbing_activity = None
self._launchers = {}
self._modal_dialogs_counter = 0
self._screen.toggle_showing_desktop(True)
settings = Gio.Settings('org.sugarlabs')
self._maximum_open_activities = settings.get_int(
'maximum-number-of-open-activities')
self._launch_timers = {}
def get_launcher(self, activity_id):
return self._launchers.get(str(activity_id))
def register_launcher(self, activity_id, launcher):
self._launchers[activity_id] = launcher
def unregister_launcher(self, activity_id):
if activity_id in self._launchers:
del self._launchers[activity_id]
def _update_zoom_level(self, window):
if window.get_window_type() == Wnck.WindowType.DIALOG:
return
elif window.get_window_type() == Wnck.WindowType.NORMAL:
new_level = self.ZOOM_ACTIVITY
else:
new_level = self._desktop_level
if self._zoom_level != new_level:
old_level = self._zoom_level
self._zoom_level = new_level
self.zoom_level_changed.send(self, old_level=old_level,
new_level=new_level)
def set_zoom_level(self, new_level, x_event_time=0):
old_level = self.zoom_level
if old_level == new_level:
return
self._zoom_level = new_level
if new_level is not self.ZOOM_ACTIVITY:
self._desktop_level = new_level
self.zoom_level_changed.send(self, old_level=old_level,
new_level=new_level)
show_desktop = new_level is not self.ZOOM_ACTIVITY
self._screen.toggle_showing_desktop(show_desktop)
if new_level is self.ZOOM_ACTIVITY:
# activate the window, in case it was iconified
# (e.g. during sugar launch, the Journal starts in this state)
window = self._active_activity.get_window()
if window:
window.activate(x_event_time or Gtk.get_current_event_time())
def _get_zoom_level(self):
return self._zoom_level
zoom_level = property(_get_zoom_level)
def _get_activities_with_window(self):
ret = []
for i in self._activities:
if i.get_window() is not None:
ret.append(i)
return ret
def get_previous_activity(self, current=None):
if not current:
current = self._active_activity
activities = self._get_activities_with_window()
i = activities.index(current)
if len(activities) == 0:
return None
elif i - 1 >= 0:
return activities[i - 1]
else:
return activities[len(activities) - 1]
def get_next_activity(self, current=None):
if not current:
current = self._active_activity
activities = self._get_activities_with_window()
i = activities.index(current)
if len(activities) == 0:
return None
elif i + 1 < len(activities):
return activities[i + 1]
else:
return activities[0]
def get_active_activity(self):
"""Returns the activity that the user is currently working in"""
return self._active_activity
def add_shared_activity(self, activity_id, color):
self._shared_activities[activity_id] = color
def remove_shared_activity(self, activity_id):
del self._shared_activities[activity_id]
def get_tabbing_activity(self):
"""Returns the activity that is currently highlighted during tabbing"""
return self._tabbing_activity
def set_tabbing_activity(self, activity):
"""Sets the activity that is currently highlighted during tabbing"""
self._tabbing_activity = activity
self.emit('tabbing-activity-changed', self._tabbing_activity)
def _set_active_activity(self, home_activity):
if self._active_activity == home_activity:
return
if home_activity:
home_activity.set_active(True)
if self._active_activity:
self._active_activity.set_active(False)
self._active_activity = home_activity
self.emit('active-activity-changed', self._active_activity)
def __iter__(self):
return iter(self._activities)
def __len__(self):
return len(self._activities)
def __getitem__(self, i):
return self._activities[i]
def index(self, obj):
return self._activities.index(obj)
def _window_opened_cb(self, screen, window):
"""Handle the callback for the 'window opened' event.
Most activities will register 2 windows during
their lifetime: the launcher window, and the 'main'
app window.
When the main window appears, we send a signal to
the launcher window to close.
Some activities (notably non-native apps) open several
windows during their lifetime, switching from one to
the next as the 'main' window. We use a stack to track
them.
"""
if window.get_window_type() == Wnck.WindowType.NORMAL or \
window.get_window_type() == Wnck.WindowType.SPLASHSCREEN:
home_activity = None
xid = window.get_xid()
activity_id = SugarExt.wm_get_activity_id(xid)
service_name = SugarExt.wm_get_bundle_id(xid)
if service_name:
registry = get_registry()
activity_info = registry.get_bundle(service_name)
else:
activity_info = None
if activity_id:
home_activity = self.get_activity_by_id(activity_id)
display = Gdk.Display.get_default()
gdk_window = GdkX11.X11Window.foreign_new_for_display(display,
xid)
gdk_window.set_decorations(0)
window.maximize()
def is_main_window(window, home_activity):
# Check if window is the 'main' app window, not the
# launcher window.
return window.get_window_type() != \
Wnck.WindowType.SPLASHSCREEN and \
home_activity.get_launch_status() == Activity.LAUNCHING
if home_activity is None and \
window.get_window_type() == Wnck.WindowType.NORMAL:
# This is a special case for the Journal
# We check if is not a splash screen to avoid #4767
logging.debug('first window registered for %s', activity_id)
color = self._shared_activities.get(activity_id, None)
home_activity = Activity(activity_info, activity_id,
color, window)
self._add_activity(home_activity)
else:
logging.debug('window registered for %s', activity_id)
home_activity.add_window(window, is_main_window(window,
home_activity))
if is_main_window(window, home_activity):
self.emit('launch-completed', home_activity)
startup_time = time.time() - home_activity.get_launch_time()
logging.debug('%s launched in %f seconds.',
activity_id, startup_time)
if self._active_activity is None:
self._set_active_activity(home_activity)
def _window_closed_cb(self, screen, window):
if window.get_window_type() == Wnck.WindowType.NORMAL or \
window.get_window_type() == Wnck.WindowType.SPLASHSCREEN:
xid = window.get_xid()
activity = self._get_activity_by_xid(xid)
if activity is not None:
activity.remove_window_by_xid(xid)
if activity.get_window() is None:
logging.debug('last window gone - remove activity %s',
activity)
activity.close_window()
self._remove_activity(activity)
def _get_activity_by_xid(self, xid):
for home_activity in self._activities:
if home_activity.has_xid(xid):
return home_activity
return None
def get_activity_by_id(self, activity_id):
for home_activity in self._activities:
if home_activity.get_activity_id() == activity_id:
return home_activity
return None
def _active_window_changed_cb(self, screen, previous_window=None):
window = screen.get_active_window()
if window is None:
return
if window.get_window_type() != Wnck.WindowType.DIALOG:
while window.get_transient() is not None:
window = window.get_transient()
act = self._get_activity_by_xid(window.get_xid())
if act is not None:
self._set_active_activity(act)
self._update_zoom_level(window)
def get_name_from_bundle_id(self, bundle_id):
for activity in self._get_activities_with_window():
if activity.get_bundle_id() == bundle_id:
return activity.get_activity_name()
return ''
def can_launch_activity_instance(self, bundle):
if bundle.get_single_instance():
bundle_id = bundle.get_bundle_id()
for activity in self._get_activities_with_window():
if activity.get_bundle_id() == bundle_id:
return False
return True
def can_launch_activity(self):
activities = self._get_activities_with_window()
if self._maximum_open_activities > 0 and \
len(activities) > self._maximum_open_activities:
return False
else:
return True
def _add_activity(self, home_activity):
self._activities.append(home_activity)
self.emit('activity-added', home_activity)
def _remove_activity(self, home_activity):
if home_activity == self._active_activity:
windows = Wnck.Screen.get_default().get_windows_stacked()
windows.reverse()
for window in windows:
new_activity = self._get_activity_by_xid(window.get_xid())
if new_activity is not None:
self._set_active_activity(new_activity)
break
else:
logging.error('No activities are running')
self._set_active_activity(None)
self.emit('activity-removed', home_activity)
self._activities.remove(home_activity)
def notify_launch(self, activity_id, service_name):
registry = get_registry()
activity_info = registry.get_bundle(service_name)
if not activity_info:
raise ValueError("Activity service name '%s'"
" was not found in the bundle registry."
% service_name)
color = self._shared_activities.get(activity_id, None)
home_activity = Activity(activity_info, activity_id, color)
self._add_activity(home_activity)
self._set_active_activity(home_activity)
self.emit('launch-started', home_activity)
if activity_id in self._launch_timers:
GObject.source_remove(self._launch_timers[activity_id])
del self._launch_timers[activity_id]
timer = GObject.timeout_add_seconds(90, self._check_activity_launched,
activity_id)
self._launch_timers[activity_id] = timer
def notify_launch_failed(self, activity_id):
home_activity = self.get_activity_by_id(activity_id)
if home_activity:
logging.debug('Activity %s (%s) launch failed', activity_id,
home_activity.get_type())
if self.get_launcher(activity_id) is not None:
self.emit('launch-failed', home_activity)
else:
# activity sent failure notification after closing launcher
self._remove_activity(home_activity)
else:
logging.error('Model for activity id %s does not exist.',
activity_id)
def _check_activity_launched(self, activity_id):
del self._launch_timers[activity_id]
home_activity = self.get_activity_by_id(activity_id)
if not home_activity:
logging.debug('Activity %s has been closed already.', activity_id)
return False
if self.get_launcher(activity_id) is not None:
logging.debug('Activity %s still launching, assuming it failed.',
activity_id)
self.notify_launch_failed(activity_id)
return False
def push_modal(self):
self._modal_dialogs_counter += 1
def pop_modal(self):
self._modal_dialogs_counter -= 1
def has_modal(self):
return self._modal_dialogs_counter > 0
def get_model():
global _model
if _model is None:
_model = ShellModel()
return _model
| gpl-3.0 | -8,484,930,277,310,287,000 | 34.802043 | 79 | 0.582991 | false |
cwisecarver/osf.io | addons/dropbox/migrations/0001_initial.py | 28 | 1508 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-23 20:34
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.base
import osf.utils.datetime_aware_jsonfield
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NodeSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('folder', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('oauth_grants', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict)),
],
options={
'abstract': False,
},
),
]
| apache-2.0 | 8,273,435,826,323,470,000 | 34.904762 | 129 | 0.574271 | false |
nuagenetworks/vspk-python | vspk/v5_0/numacfilterprofile.py | 1 | 7688 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUMACFilterProfile(NURESTObject):
""" Represents a MACFilterProfile in the VSD
Notes:
7x50 MAC Filter profile
"""
__rest_name__ = "macfilterprofile"
__resource_name__ = "macfilterprofiles"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a MACFilterProfile instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> macfilterprofile = NUMACFilterProfile(id=u'xxxx-xxx-xxx-xxx', name=u'MACFilterProfile')
>>> macfilterprofile = NUMACFilterProfile(data=my_dict)
"""
super(NUMACFilterProfile, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._description = None
self._entity_scope = None
self._assoc_entity_type = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the MAC Profile entity.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the MAC Profile entity.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def description(self):
""" Get description value.
Notes:
A detailed description of the MAC Profile entity.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A detailed description of the MAC Profile entity.
"""
self._description = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause | 2,725,360,634,292,582,000 | 28.802326 | 175 | 0.596124 | false |
teeple/pns_server | work/install/Python-2.7.4/Lib/lib2to3/fixes/fix_itertools.py | 148 | 1549 | """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
| gpl-2.0 | 7,372,252,283,408,376,000 | 35.023256 | 77 | 0.567463 | false |
benchisell/photostream-bc | flask/lib/python2.7/site-packages/migrate/versioning/schemadiff.py | 52 | 8741 | """
Schema differencing support.
"""
import logging
import sqlalchemy
from sqlalchemy.types import Float
log = logging.getLogger(__name__)
def getDiffOfModelAgainstDatabase(metadata, engine, excludeTables=None):
"""
Return differences of model against database.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
db_metadata = sqlalchemy.MetaData(engine, reflect=True)
# sqlite will include a dynamically generated 'sqlite_sequence' table if
# there are autoincrement sequences in the database; this should not be
# compared.
if engine.dialect.name == 'sqlite':
if 'sqlite_sequence' in db_metadata.tables:
db_metadata.remove(db_metadata.tables['sqlite_sequence'])
return SchemaDiff(metadata, db_metadata,
labelA='model',
labelB='database',
excludeTables=excludeTables)
def getDiffOfModelAgainstModel(metadataA, metadataB, excludeTables=None):
"""
Return differences of model against another model.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
return SchemaDiff(metadataA, metadataB, excludeTables)
class ColDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Column`
between two :class:`~sqlalchemy.schema.Table` instances, ``A``
and ``B``.
.. attribute:: col_A
The :class:`~sqlalchemy.schema.Column` object for A.
.. attribute:: col_B
The :class:`~sqlalchemy.schema.Column` object for B.
.. attribute:: type_A
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
.. attribute:: type_B
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
"""
diff = False
def __init__(self,col_A,col_B):
self.col_A = col_A
self.col_B = col_B
self.type_A = col_A.type
self.type_B = col_B.type
self.affinity_A = self.type_A._type_affinity
self.affinity_B = self.type_B._type_affinity
if self.affinity_A is not self.affinity_B:
self.diff = True
return
if isinstance(self.type_A,Float) or isinstance(self.type_B,Float):
if not (isinstance(self.type_A,Float) and isinstance(self.type_B,Float)):
self.diff=True
return
for attr in ('precision','scale','length'):
A = getattr(self.type_A,attr,None)
B = getattr(self.type_B,attr,None)
if not (A is None or B is None) and A!=B:
self.diff=True
return
def __nonzero__(self):
return self.diff
class TableDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Table`
between two :class:`~sqlalchemy.schema.MetaData` instances, ``A``
and ``B``.
.. attribute:: columns_missing_from_A
A sequence of column names that were found in B but weren't in
A.
.. attribute:: columns_missing_from_B
A sequence of column names that were found in A but weren't in
B.
.. attribute:: columns_different
A dictionary containing information about columns that were
found to be different.
It maps column names to a :class:`ColDiff` objects describing the
differences found.
"""
__slots__ = (
'columns_missing_from_A',
'columns_missing_from_B',
'columns_different',
)
def __nonzero__(self):
return bool(
self.columns_missing_from_A or
self.columns_missing_from_B or
self.columns_different
)
class SchemaDiff(object):
"""
Compute the difference between two :class:`~sqlalchemy.schema.MetaData`
objects.
The string representation of a :class:`SchemaDiff` will summarise
the changes found between the two
:class:`~sqlalchemy.schema.MetaData` objects.
The length of a :class:`SchemaDiff` will give the number of
changes found, enabling it to be used much like a boolean in
expressions.
:param metadataA:
First :class:`~sqlalchemy.schema.MetaData` to compare.
:param metadataB:
Second :class:`~sqlalchemy.schema.MetaData` to compare.
:param labelA:
The label to use in messages about the first
:class:`~sqlalchemy.schema.MetaData`.
:param labelB:
The label to use in messages about the second
:class:`~sqlalchemy.schema.MetaData`.
:param excludeTables:
A sequence of table names to exclude.
.. attribute:: tables_missing_from_A
A sequence of table names that were found in B but weren't in
A.
.. attribute:: tables_missing_from_B
A sequence of table names that were found in A but weren't in
B.
.. attribute:: tables_different
A dictionary containing information about tables that were found
to be different.
It maps table names to a :class:`TableDiff` objects describing the
differences found.
"""
def __init__(self,
metadataA, metadataB,
labelA='metadataA',
labelB='metadataB',
excludeTables=None):
self.metadataA, self.metadataB = metadataA, metadataB
self.labelA, self.labelB = labelA, labelB
self.label_width = max(len(labelA),len(labelB))
excludeTables = set(excludeTables or [])
A_table_names = set(metadataA.tables.keys())
B_table_names = set(metadataB.tables.keys())
self.tables_missing_from_A = sorted(
B_table_names - A_table_names - excludeTables
)
self.tables_missing_from_B = sorted(
A_table_names - B_table_names - excludeTables
)
self.tables_different = {}
for table_name in A_table_names.intersection(B_table_names):
td = TableDiff()
A_table = metadataA.tables[table_name]
B_table = metadataB.tables[table_name]
A_column_names = set(A_table.columns.keys())
B_column_names = set(B_table.columns.keys())
td.columns_missing_from_A = sorted(
B_column_names - A_column_names
)
td.columns_missing_from_B = sorted(
A_column_names - B_column_names
)
td.columns_different = {}
for col_name in A_column_names.intersection(B_column_names):
cd = ColDiff(
A_table.columns.get(col_name),
B_table.columns.get(col_name)
)
if cd:
td.columns_different[col_name]=cd
# XXX - index and constraint differences should
# be checked for here
if td:
self.tables_different[table_name]=td
def __str__(self):
''' Summarize differences. '''
out = []
column_template =' %%%is: %%r' % self.label_width
for names,label in (
(self.tables_missing_from_A,self.labelA),
(self.tables_missing_from_B,self.labelB),
):
if names:
out.append(
' tables missing from %s: %s' % (
label,', '.join(sorted(names))
)
)
for name,td in sorted(self.tables_different.items()):
out.append(
' table with differences: %s' % name
)
for names,label in (
(td.columns_missing_from_A,self.labelA),
(td.columns_missing_from_B,self.labelB),
):
if names:
out.append(
' %s missing these columns: %s' % (
label,', '.join(sorted(names))
)
)
for name,cd in td.columns_different.items():
out.append(' column with differences: %s' % name)
out.append(column_template % (self.labelA,cd.col_A))
out.append(column_template % (self.labelB,cd.col_B))
if out:
out.insert(0, 'Schema diffs:')
return '\n'.join(out)
else:
return 'No schema diffs'
def __len__(self):
"""
Used in bool evaluation, return of 0 means no diffs.
"""
return (
len(self.tables_missing_from_A) +
len(self.tables_missing_from_B) +
len(self.tables_different)
)
| bsd-3-clause | -7,693,069,572,257,641,000 | 28.934932 | 85 | 0.569386 | false |
bguillot/OpenUpgrade | addons/sale/sale.py | 17 | 67360 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'date_order': fields.datetime.now(),
'state': 'draft',
'invoice_ids': [],
'date_confirm': False,
'client_order_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'sale.order'),
'procurement_group_id': False,
})
return super(sale_order, self).copy(cr, uid, id, default, context=context)
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if arg[1] == '=':
if arg[2]:
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
sale_clause = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', size=64, required=True,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', size=64, help="Reference of the document that generated this sales order request."),
'client_order_ref': fields.char('Reference/Description', size=64),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed."),
'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."),
'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."),
'order_policy': fields.selection([
('manual', 'On Demand'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""This field controls how invoice and delivery operations are synchronized."""),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True),
'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Paid',
fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."),
'invoice_exists': fields.function(_invoice_exists, string='Invoiced',
fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."),
'note': fields.text('Terms and conditions'),
'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The total amount."),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'company_id': fields.many2one('res.company', 'Company'),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'procurement_group_id': fields.many2one('procurement.group', 'Procurement group'),
}
_defaults = {
'date_order': fields.datetime.now,
'order_policy': 'manual',
'company_id': _get_default_company,
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: '/',
'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'],
'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'],
'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_order = 'date_order desc, id desc'
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def copy_quotation(self, cr, uid, ids, context=None):
id = self.copy(cr, uid, ids[0], context=None)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': id,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None):
context = context or {}
if not pricelist_id:
return {}
value = {
'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id
}
if not order_lines:
return {'value': value}
warning = {
'title': _('Pricelist Warning!'),
'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.')
}
return {'warning': warning, 'value': value}
def get_salenote(self, cr, uid, ids, partner_id, context=None):
context_lang = context.copy()
if partner_id:
partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang
context_lang.update({'lang': partner_lang})
return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact'])
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
payment_term = part.property_payment_term and part.property_payment_term.id or False
fiscal_position = part.property_account_position and part.property_account_position.id or False
dedicated_salesman = part.user_id and part.user_id.id or uid
val = {
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'payment_term': payment_term,
'fiscal_position': fiscal_position,
'user_id': dedicated_salesman,
}
if pricelist:
val['pricelist_id'] = pricelist
sale_note = self.get_salenote(cr, uid, ids, part.id, context=context)
if sale_note: val.update({'note': sale_note})
return {'value': val}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order') or '/'
if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context)['value']
vals = dict(defaults, **vals)
context.update({'mail_create_nolog': True})
new_id = super(sale_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=context)
return new_id
def button_dummy(self, cr, uid, ids, context=None):
return True
# FIXME: deprecated method, overriders should be using _prepare_invoice() instead.
# can be removed after 6.1.
def _inv_get(self, cr, uid, order, context=None):
return {}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
"""Prepare the dict of values to create the new invoice for a
sales order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: sale.order record to invoice
:param list(int) line: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
if context is None:
context = {}
journal_ids = self.pool.get('account.journal').search(cr, uid,
[('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
invoice_vals = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': order.client_order_ref or order.name,
'account_id': order.partner_id.property_account_receivable.id,
'partner_id': order.partner_invoice_id.id,
'journal_id': journal_ids[0],
'invoice_line': [(6, 0, lines)],
'currency_id': order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': order.payment_term and order.payment_term.id or False,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'date_invoice': context.get('date_invoice', False),
'company_id': order.company_id.id,
'user_id': order.user_id and order.user_id.id or False,
'section_id' : order.section_id.id
}
# Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1
invoice_vals.update(self._inv_get(cr, uid, order, context=context))
return invoice_vals
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit})
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_quotation_sent(cr, uid, ids)
return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context)
def manual_invoice(self, cr, uid, ids, context=None):
""" create invoices for the given sales orders (ids), and open the form
view of one of the newly created invoices
"""
mod_obj = self.pool.get('ir.model.data')
# create invoices through the sales orders' workflow
inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
self.signal_manual_invoice(cr, uid, ids)
inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
# determine newly created invoices
new_inv_ids = list(inv_ids1 - inv_ids0)
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
return {
'name': _('Customer Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': new_inv_ids and new_inv_ids[0] or False,
}
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of invoices to display
inv_ids = []
for so in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in so.invoice_ids]
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def test_no_product(self, cr, uid, order, context):
for line in order.order_line:
if line.product_id and (line.product_id.type<>'service'):
return False
return True
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None):
if states is None:
states = ['confirmed', 'done', 'exception']
res = False
invoices = {}
invoice_ids = []
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
partner_currency = {}
if context is None:
context = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_invoice:
context['date_invoice'] = date_invoice
for o in self.browse(cr, uid, ids, context=context):
currency_id = o.pricelist_id.currency_id.id
if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id):
raise osv.except_osv(
_('Error!'),
_('You cannot group sales having different currencies for the same partner.'))
partner_currency[o.partner_id.id] = currency_id
lines = []
for line in o.order_line:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
for o, l in val:
invoice_ref += o.name + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
#remove last '|' in invoice_ref
if len(invoice_ref) >= 1:
invoice_ref = invoice_ref[:-1]
invoice.write(cr, uid, [res], {'origin': invoice_ref, 'name': invoice_ref})
else:
for order, il in val:
res = self._make_invoice(cr, uid, order, il, context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
return res
def action_invoice_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context)
return True
def action_invoice_end(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context=context):
for line in this.order_line:
if line.state == 'exception':
line.write({'state': 'confirmed'})
if this.state == 'invoice_except':
this.write({'state': 'progress'})
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
account_invoice_obj = self.pool.get('account.invoice')
for sale in self.browse(cr, uid, ids, context=context):
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel this sales order!'),
_('First cancel all invoices attached to this sales order.'))
for r in self.read(cr, uid, ids, ['invoice_ids']):
account_invoice_obj.signal_invoice_cancel(cr, uid, r['invoice_ids'])
sale_order_line_obj.write(cr, uid, [l.id for l in sale.order_line],
{'state': 'cancel'})
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.signal_order_confirm(cr, uid, ids)
# redisplay the record as a sales order
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': ids[0],
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def action_wait(self, cr, uid, ids, context=None):
context = context or {}
for o in self.browse(cr, uid, ids):
if not o.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.'))
noprod = self.test_no_product(cr, uid, o, context)
if (o.order_policy == 'manual') or noprod:
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line])
return True
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def action_done(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line], {'state': 'done'}, context=context)
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id,
'company_id': order.company_id.id,
'group_id': group_id,
'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none',
'sale_line_id': line.id
}
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
return {'name': order.name, 'partner_id': order.partner_shipping_id.id}
def procurement_needed(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (project_mrp, sale_stock) that will change this.
sale_line_obj = self.pool.get('sale.order.line')
res = []
for order in self.browse(cr, uid, ids, context=context):
res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line], context=context))
return any(res)
def action_ignore_delivery_exception(self, cr, uid, ids, context=None):
for sale_order in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context)
return True
def action_ship_create(self, cr, uid, ids, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
:return: True
"""
procurement_obj = self.pool.get('procurement.order')
sale_line_obj = self.pool.get('sale.order.line')
for order in self.browse(cr, uid, ids, context=context):
proc_ids = []
vals = self._prepare_procurement_group(cr, uid, order, context=context)
if not order.procurement_group_id:
group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context)
order.write({'procurement_group_id': group_id}, context=context)
for line in order.order_line:
#Try to fix exception procurement (possible when after a shipping exception the user choose to recreate)
if line.procurement_ids:
#first check them to see if they are in exception or not (one of the related moves is cancelled)
procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']])
line.refresh()
#run again procurement that are in exception in order to trigger another move
proc_ids += [x.id for x in line.procurement_ids if x.state == 'exception']
elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context):
if (line.state == 'done') or not line.product_id:
continue
vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
proc_id = procurement_obj.create(cr, uid, vals, context=context)
proc_ids.append(proc_id)
#Confirm procurement order such that rules will be applied on it
#note that the workflow normally ensure proc_ids isn't an empty list
procurement_obj.run(cr, uid, proc_ids, context=context)
#if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO
if order.state == 'shipping_except':
val = {'state': 'progress', 'shipped': False}
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
# if mode == 'finished':
# returns True if all lines are done, False otherwise
# if mode == 'canceled':
# returns True if there is at least one canceled line, False otherwise
def test_state(self, cr, uid, ids, mode, *args):
assert mode in ('finished', 'canceled'), _("invalid mode for test_state")
finished = True
canceled = False
write_done_ids = []
write_cancel_ids = []
for order in self.browse(cr, uid, ids, context={}):
#TODO: Need to rethink what happens when cancelling
for line in order.order_line:
states = [x.state for x in line.procurement_ids]
cancel = states and all([x == 'cancel' for x in states])
doneorcancel = all([x in ('done', 'cancel') for x in states])
if cancel:
canceled = True
if line.state != 'exception':
write_cancel_ids.append(line.id)
if not doneorcancel:
finished = False
if doneorcancel and not cancel:
write_done_ids.append(line.id)
if write_done_ids:
self.pool.get('sale.order.line').write(cr, uid, write_done_ids, {'state': 'done'})
if write_cancel_ids:
self.pool.get('sale.order.line').write(cr, uid, write_cancel_ids, {'state': 'exception'})
if mode == 'finished':
return finished
elif mode == 'canceled':
return canceled
def procurement_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.procurement_ids]
return res
def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None):
'''Update taxes of order lines for each line where a product is defined
:param list ids: not used
:param int fiscal_position: sale order fiscal position
:param list order_lines: command list for one2many write method
'''
order_line = []
fiscal_obj = self.pool.get('account.fiscal.position')
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('sale.order.line')
fpos = False
if fiscal_position:
fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context)
for line in order_lines:
# create (0, 0, { fields })
# update (1, ID, { fields })
if line[0] in [0, 1]:
prod = None
if line[2].get('product_id'):
prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context)
elif line[1]:
prod = line_obj.browse(cr, uid, line[1], context=context).product_id
if prod and prod.taxes_id:
line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]
order_line.append(line)
# link (4, ID)
# link all (6, 0, IDS)
elif line[0] in [4, 6]:
line_ids = line[0] == 4 and [line[1]] or line[2]
for line_id in line_ids:
prod = line_obj.browse(cr, uid, line_id, context=context).product_id
if prod and prod.taxes_id:
order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}])
else:
order_line.append([4, line_id])
else:
order_line.append(line)
return {'value': {'order_line': order_line}}
# TODO add a field price_unit_uos
# - update it on change product and unit price
# - use it in report if there is a uos
class sale_order_line(osv.osv):
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (project_mrp, sale_stock) that will change this.
return False
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = this.invoice_lines and \
all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines)
return res
def _order_lines_from_invoice(self, cr, uid, ids, context=None):
# direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise)
cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN
sale_order_line sol ON (sol.order_id = rel.order_id)
WHERE rel.invoice_id = ANY(%s)""", (list(ids),))
return [i[0] for i in cr.fetchall()]
_name = 'sale.order.line'
_description = 'Sales Order Line'
_columns = {
'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'),
'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean',
store={
'account.invoice': (_order_lines_from_invoice, ['state'], 10),
'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10)
}),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}),
'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."),
'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')], 'Status', required=True, readonly=True,
help='* The \'Draft\' status is set when the related sales order in draft status. \
\n* The \'Confirmed\' status is set when the related sales order is confirmed. \
\n* The \'Exception\' status is set when the related sales order is set as exception. \
\n* The \'Done\' status is set when the sales order line has been picked. \
\n* The \'Cancelled\' status is set when a user cancel the sales order related.'),
'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'),
'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'),
}
_order = 'order_id desc, sequence, id'
_defaults = {
'product_uom' : _get_uom_id,
'discount': 0.0,
'product_uom_qty': 1,
'product_uos_qty': 1,
'sequence': 10,
'state': 'draft',
'price_unit': 0.0,
'delay': 0.0,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
def _get_line_uom(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Prepare the dict of values to create the new invoice line for a
sales order line. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record line: sale.order.line record to invoice
:param int account_id: optional ID of a G/L account to force
(this is used for returning products including service)
:return: dict of values to create() the invoice line
"""
res = {}
if not line.invoiced:
if not account_id:
if line.product_id:
account_id = line.product_id.property_account_income.id
if not account_id:
account_id = line.product_id.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_('Error!'),
_('Please define income account for this product: "%s" (id:%d).') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
account_id = prop and prop.id or False
uosqty = self._get_line_qty(cr, uid, line, context=context)
uos_id = self._get_line_uom(cr, uid, line, context=context)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price'))
fpos = line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id)
if not account_id:
raise osv.except_osv(_('Error!'),
_('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))
res = {
'name': line.name,
'sequence': line.sequence,
'origin': line.order_id.name,
'account_id': account_id,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
}
return res
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
create_ids = []
sales = set()
for line in self.browse(cr, uid, ids, context=context):
vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context)
if vals:
inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context)
self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context)
sales.add(line.order_id.id)
create_ids.append(inv_id)
# Trigger workflow events
for sale_id in sales:
workflow.trg_write(uid, 'sale.order', sale_id, cr)
return create_ids
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.invoiced:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def button_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'})
def button_done(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {'state': 'done'})
for line in self.browse(cr, uid, ids, context=context):
workflow.trg_write(uid, 'sale.order', line.order_id.id, cr)
return res
def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None):
product_obj = self.pool.get('product.product')
if not product_id:
return {'value': {'product_uom': product_uos,
'product_uom_qty': product_uos_qty}, 'domain': {}}
product = product_obj.browse(cr, uid, product_id)
value = {
'product_uom': product.uom_id.id,
}
# FIXME must depend on uos/uom of the product and not only of the coeff.
try:
value.update({
'product_uom_qty': product_uos_qty / product.uos_coeff,
'th_weight': product_uos_qty / product.uos_coeff * product.weight
})
except ZeroDivisionError:
pass
return {'value': value}
def create(self, cr, uid, values, context=None):
if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'type', 'product_uom_qty', 'product_uom']):
order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context)
defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'],
qty=float(values.get('product_uom_qty', False)),
uom=values.get('product_uom', False),
qty_uos=float(values.get('product_uos_qty', False)),
uos=values.get('product_uos', False),
name=values.get('name', False),
partner_id=order['partner_id'][0],
date_order=order['date_order'],
fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False,
flag=False, # Force name update
context=context
)['value']
values = dict(defaults, **values)
return super(sale_order_line, self).create(cr, uid, values, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state': 'draft', 'invoice_lines': [], 'procurement_ids': []})
return super(sale_order_line, self).copy_data(cr, uid, id, default, context=context)
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
lang = lang or context.get('lang', False)
if not partner_id:
raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.'))
warning = False
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
context = {'lang': lang, 'partner_id': partner_id}
partner = partner_obj.browse(cr, uid, partner_id)
lang = partner.lang
context_partner = {'lang': lang, 'partner_id': partner_id}
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
warning_msgs = ''
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = product_uom_obj.browse(cr, uid, uos)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner.property_account_position or False
else:
fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position)
if update_tax: #The quantity only have changed
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id)
if not flag:
result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
# get unit price
if not pricelist:
warn_msg = _('You have to select a pricelist or a customer in the sales form !\n'
'Please set one before choosing a product.')
warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n"
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, {
'uom': uom or result.get('product_uom'),
'date': date_order,
})[pricelist]
if price is False:
warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n"
else:
result.update({'price_unit': price})
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
return {'value': result, 'domain': domain, 'warning': warning}
def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None):
context = context or {}
lang = lang or ('lang' in context and context['lang'])
if not uom:
return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}}
return self.product_id_change(cursor, user, ids, pricelist, product,
qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id, lang=lang, update_tax=update_tax,
date_order=date_order, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Allows to delete sales order lines in draft,cancel states"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,))
return super(sale_order_line, self).unlink(cr, uid, ids, context=context)
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('sale.order').signal_quotation_sent(cr, uid, [context['default_res_id']])
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_defaults = {
'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c)
}
def confirm_paid(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order')
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context)
for so_id in so_ids:
sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context)
return res
def unlink(self, cr, uid, ids, context=None):
""" Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """
invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context)
#if we can't cancel all invoices, do nothing
if len(invoice_ids) == len(ids):
#Cancel invoice(s) first before deleting them so that if any sale order is associated with them
#it will trigger the workflow to put the sale order in an 'invoice exception' state
for id in ids:
workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr)
return super(account_invoice, self).unlink(cr, uid, ids, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'),
}
class product_product(osv.Model):
_inherit = 'product.product'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
SaleOrderLine = self.pool['sale.order.line']
return {
product_id: SaleOrderLine.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,351,337,016,951,447,000 | 51.217054 | 306 | 0.576292 | false |
mmalorni/server-tools | __unported__/fetchmail_attach_from_folder/match_algorithm/email_domain.py | 6 | 1985 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from email_exact import email_exact
class email_domain(email_exact):
'''Search objects by domain name of email address.
Beware of match_first here, this is most likely to get it wrong (gmail)'''
name = 'Domain of email address'
def search_matches(self, cr, uid, conf, mail_message, mail_message_org):
ids = super(email_domain, self).search_matches(
cr, uid, conf, mail_message, mail_message_org)
if not ids:
domains = []
for addr in self._get_mailaddresses(conf, mail_message):
domains.append(addr.split('@')[-1])
ids = conf.pool.get(conf.model_id.model).search(
cr, uid,
self._get_mailaddress_search_domain(
conf, mail_message,
operator='like',
values=['%@'+domain for domain in set(domains)]),
order=conf.model_order)
return ids
| agpl-3.0 | 4,544,912,026,635,328,500 | 44.113636 | 78 | 0.578841 | false |
loco-odoo/localizacion_co | openerp/addons/crm_partner_assign/report/crm_partner_report.py | 264 | 3374 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
class crm_partner_report_assign(osv.osv):
""" CRM Lead Report """
_name = "crm.partner.report.assign"
_auto = False
_description = "CRM Partner Report"
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', required=False, readonly=True),
'grade_id':fields.many2one('res.partner.grade', 'Grade', readonly=True),
'activation' : fields.many2one('res.partner.activation', 'Activation', select=1),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'date_review' : fields.date('Latest Partner Review'),
'date_partnership' : fields.date('Partnership Date'),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'opp': fields.integer('# of Opportunity', readonly=True), # TDE FIXME master: rename into nbr_opportunities
'turnover': fields.float('Turnover', readonly=True),
'period_id': fields.many2one('account.period', 'Invoice Period', readonly=True),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_partner_report_assign')
cr.execute("""
CREATE OR REPLACE VIEW crm_partner_report_assign AS (
SELECT
coalesce(i.id, p.id - 1000000000) as id,
p.id as partner_id,
(SELECT country_id FROM res_partner a WHERE a.parent_id=p.id AND country_id is not null limit 1) as country_id,
p.grade_id,
p.activation,
p.date_review,
p.date_partnership,
p.user_id,
p.section_id,
(SELECT count(id) FROM crm_lead WHERE partner_assigned_id=p.id) AS opp,
i.price_total as turnover,
i.period_id
FROM
res_partner p
left join account_invoice_report i
on (i.partner_id=p.id and i.type in ('out_invoice','out_refund') and i.state in ('open','paid'))
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,550,536,960,857,555,000 | 46.521127 | 131 | 0.577356 | false |
fuselock/odoo | addons/email_template/wizard/email_template_preview.py | 377 | 3851 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "email.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('email.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).default_get(cr, uid, fields, context=context)
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a Default
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,725,880,260,493,870,000 | 42.761364 | 194 | 0.613347 | false |
fronzbot/blinkpy | blinkpy/blinkpy.py | 1 | 13098 | # -*- coding: utf-8 -*-
"""
blinkpy is an unofficial api for the Blink security camera system.
repo url: https://github.com/fronzbot/blinkpy
Original protocol hacking by MattTW :
https://github.com/MattTW/BlinkMonitorProtocol
Published under the MIT license - See LICENSE file for more details.
"Blink Wire-Free HS Home Monitoring & Alert Systems" is a trademark
owned by Immedia Inc., see www.blinkforhome.com for more information.
blinkpy is in no way affiliated with Blink, nor Immedia Inc.
"""
import os.path
import time
import logging
from shutil import copyfileobj
from requests.structures import CaseInsensitiveDict
from dateutil.parser import parse
from slugify import slugify
from blinkpy import api
from blinkpy.sync_module import BlinkSyncModule, BlinkOwl
from blinkpy.helpers import util
from blinkpy.helpers.constants import (
DEFAULT_MOTION_INTERVAL,
DEFAULT_REFRESH,
MIN_THROTTLE_TIME,
TIMEOUT_MEDIA,
)
from blinkpy.helpers.constants import __version__
from blinkpy.auth import Auth, TokenRefreshFailed, LoginError
_LOGGER = logging.getLogger(__name__)
class Blink:
"""Class to initialize communication."""
def __init__(
self,
refresh_rate=DEFAULT_REFRESH,
motion_interval=DEFAULT_MOTION_INTERVAL,
no_owls=False,
):
"""
Initialize Blink system.
:param refresh_rate: Refresh rate of blink information.
Defaults to 15 (seconds)
:param motion_interval: How far back to register motion in minutes.
Defaults to last refresh time.
Useful for preventing motion_detected property
from de-asserting too quickly.
:param no_owls: Disable searching for owl entries (blink mini cameras only known entity). Prevents an uneccessary API call if you don't have these in your network.
"""
self.auth = Auth()
self.account_id = None
self.client_id = None
self.network_ids = []
self.urls = None
self.sync = CaseInsensitiveDict({})
self.last_refresh = None
self.refresh_rate = refresh_rate
self.networks = []
self.cameras = CaseInsensitiveDict({})
self.video_list = CaseInsensitiveDict({})
self.motion_interval = motion_interval
self.version = __version__
self.available = False
self.key_required = False
self.homescreen = {}
self.no_owls = no_owls
@util.Throttle(seconds=MIN_THROTTLE_TIME)
def refresh(self, force=False, force_cache=False):
"""
Perform a system refresh.
:param force: Used to override throttle, resets refresh
:param force_cache: Used to force update without overriding throttle
"""
if self.check_if_ok_to_update() or force or force_cache:
if not self.available:
self.setup_post_verify()
self.get_homescreen()
for sync_name, sync_module in self.sync.items():
_LOGGER.debug("Attempting refresh of sync %s", sync_name)
sync_module.refresh(force_cache=(force or force_cache))
if not force_cache:
# Prevents rapid clearing of motion detect property
self.last_refresh = int(time.time())
return True
return False
def start(self):
"""Perform full system setup."""
try:
self.auth.startup()
self.setup_login_ids()
self.setup_urls()
self.get_homescreen()
except (LoginError, TokenRefreshFailed, BlinkSetupError):
_LOGGER.error("Cannot setup Blink platform.")
self.available = False
return False
self.key_required = self.auth.check_key_required()
if self.key_required:
if self.auth.no_prompt:
return True
self.setup_prompt_2fa()
return self.setup_post_verify()
def setup_prompt_2fa(self):
"""Prompt for 2FA."""
email = self.auth.data["username"]
pin = input(f"Enter code sent to {email}: ")
result = self.auth.send_auth_key(self, pin)
self.key_required = not result
def setup_post_verify(self):
"""Initialize blink system after verification."""
try:
self.setup_networks()
networks = self.setup_network_ids()
cameras = self.setup_camera_list()
except BlinkSetupError:
self.available = False
return False
for name, network_id in networks.items():
sync_cameras = cameras.get(network_id, {})
self.setup_sync_module(name, network_id, sync_cameras)
self.cameras = self.merge_cameras()
self.available = True
self.key_required = False
return True
def setup_sync_module(self, name, network_id, cameras):
"""Initialize a sync module."""
self.sync[name] = BlinkSyncModule(self, name, network_id, cameras)
self.sync[name].start()
def get_homescreen(self):
"""Get homecreen information."""
if self.no_owls:
_LOGGER.debug("Skipping owl extraction.")
self.homescreen = {}
return
self.homescreen = api.request_homescreen(self)
def setup_owls(self):
"""Check for mini cameras."""
network_list = []
camera_list = []
try:
for owl in self.homescreen["owls"]:
name = owl["name"]
network_id = str(owl["network_id"])
if network_id in self.network_ids:
camera_list.append(
{network_id: {"name": name, "id": network_id, "type": "mini"}}
)
continue
if owl["onboarded"]:
network_list.append(str(network_id))
self.sync[name] = BlinkOwl(self, name, network_id, owl)
self.sync[name].start()
except KeyError:
# No sync-less devices found
pass
self.network_ids.extend(network_list)
return camera_list
def setup_camera_list(self):
"""Create camera list for onboarded networks."""
all_cameras = {}
response = api.request_camera_usage(self)
try:
for network in response["networks"]:
camera_network = str(network["network_id"])
if camera_network not in all_cameras:
all_cameras[camera_network] = []
for camera in network["cameras"]:
all_cameras[camera_network].append(
{"name": camera["name"], "id": camera["id"]}
)
mini_cameras = self.setup_owls()
for camera in mini_cameras:
for network, camera_info in camera.items():
all_cameras[network].append(camera_info)
return all_cameras
except (KeyError, TypeError):
_LOGGER.error("Unable to retrieve cameras from response %s", response)
raise BlinkSetupError
def setup_login_ids(self):
"""Retrieve login id numbers from login response."""
self.client_id = self.auth.client_id
self.account_id = self.auth.account_id
def setup_urls(self):
"""Create urls for api."""
try:
self.urls = util.BlinkURLHandler(self.auth.region_id)
except TypeError:
_LOGGER.error(
"Unable to extract region is from response %s", self.auth.login_response
)
raise BlinkSetupError
def setup_networks(self):
"""Get network information."""
response = api.request_networks(self)
try:
self.networks = response["summary"]
except (KeyError, TypeError):
raise BlinkSetupError
def setup_network_ids(self):
"""Create the network ids for onboarded networks."""
all_networks = []
network_dict = {}
try:
for network, status in self.networks.items():
if status["onboarded"]:
all_networks.append(f"{network}")
network_dict[status["name"]] = network
except AttributeError:
_LOGGER.error(
"Unable to retrieve network information from %s", self.networks
)
raise BlinkSetupError
self.network_ids = all_networks
return network_dict
def check_if_ok_to_update(self):
"""Check if it is ok to perform an http request."""
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False
def merge_cameras(self):
"""Merge all sync camera dicts into one."""
combined = CaseInsensitiveDict({})
for sync in self.sync:
combined = util.merge_dicts(combined, self.sync[sync].cameras)
return combined
def save(self, file_name):
"""Save login data to file."""
util.json_save(self.auth.login_attributes, file_name)
def download_videos(
self, path, since=None, camera="all", stop=10, delay=1, debug=False
):
"""
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
:param delay: Number of seconds to wait in between subsequent video downloads.
:param debug: Set to TRUE to prevent downloading of items.
Instead of downloading, entries will be printed to log.
"""
if since is None:
since_epochs = self.last_refresh
else:
parsed_datetime = parse(since, fuzzy=True)
since_epochs = parsed_datetime.timestamp()
formatted_date = util.get_time(time_to_convert=since_epochs)
_LOGGER.info("Retrieving videos since %s", formatted_date)
if not isinstance(camera, list):
camera = [camera]
for page in range(1, stop):
response = api.request_videos(self, time=since_epochs, page=page)
_LOGGER.debug("Processing page %s", page)
try:
result = response["media"]
if not result:
raise KeyError
except (KeyError, TypeError):
_LOGGER.info("No videos found on page %s. Exiting.", page)
break
self._parse_downloaded_items(result, camera, path, delay, debug)
def _parse_downloaded_items(self, result, camera, path, delay, debug):
"""Parse downloaded videos."""
for item in result:
try:
created_at = item["created_at"]
camera_name = item["device_name"]
is_deleted = item["deleted"]
address = item["media"]
except KeyError:
_LOGGER.info("Missing clip information, skipping...")
continue
if camera_name not in camera and "all" not in camera:
_LOGGER.debug("Skipping videos for %s.", camera_name)
continue
if is_deleted:
_LOGGER.debug("%s: %s is marked as deleted.", camera_name, address)
continue
clip_address = f"{self.urls.base_url}{address}"
filename = f"{camera_name}-{created_at}"
filename = f"{slugify(filename)}.mp4"
filename = os.path.join(path, filename)
if not debug:
if os.path.isfile(filename):
_LOGGER.info("%s already exists, skipping...", filename)
continue
response = api.http_get(
self,
url=clip_address,
stream=True,
json=False,
timeout=TIMEOUT_MEDIA,
)
with open(filename, "wb") as vidfile:
copyfileobj(response.raw, vidfile)
_LOGGER.info("Downloaded video to %s", filename)
else:
print(
(
f"Camera: {camera_name}, Timestamp: {created_at}, "
"Address: {address}, Filename: {filename}"
)
)
if delay > 0:
time.sleep(delay)
class BlinkSetupError(Exception):
"""Class to handle setup errors."""
| mit | 1,351,181,949,835,077,000 | 35.18232 | 172 | 0.563216 | false |
butchman0922/gourmet | gourmet/plugins/import_export/plaintext_plugin/plaintext_importer_plugin.py | 6 | 2087 | from gourmet.plugin import ImporterPlugin
from gourmet.importers.importer import Tester
from gourmet.threadManager import get_thread_manager
from gourmet.importers.interactive_importer import InteractiveImporter
from gourmet import check_encodings
import os.path
import fnmatch
from gettext import gettext as _
MAX_PLAINTEXT_LENGTH = 100000
class PlainTextImporter (InteractiveImporter):
name = 'Plain Text Importer'
def __init__ (self, filename):
self.filename = filename
InteractiveImporter.__init__(self)
def do_run (self):
if os.path.getsize(self.filename) > MAX_PLAINTEXT_LENGTH*16:
del data
ifi.close()
import gourmet.gtk_extras.dialog_extras as de
de.show_message(title=_('Big File'),
label=_('File %s is too big to import'%self.filename),
sublabel=_('Your file exceeds the maximum length of %s characters. You probably didn\'t mean to import it anyway. If you really do want to import this file, use a text editor to split it into smaller files and try importing again.')%MAX_PLAINTEXT_LENGTH,
message_type=gtk.MESSAGE_ERROR)
return
ifi = file(self.filename,'r')
data = '\n'.join(check_encodings.get_file(ifi))
ifi.close()
self.set_text(data)
return InteractiveImporter.do_run(self)
class PlainTextImporterPlugin (ImporterPlugin):
name = _('Plain Text file')
patterns = ['*.txt','[^.]*','*']
mimetypes = ['text/plain']
antipatterns = ['*.html','*.htm','*.xml','*.doc','*.rtf']
def test_file (self, filename):
'''Given a filename, test whether the file is of this type.'''
if filename.endswith('.txt'):
return 1
elif not True in [fnmatch.fnmatch(filename,p) for p in self.antipatterns]:
return -1 # we are a fallback option
def get_importer (self, filename):
return PlainTextImporter(filename=filename)
| gpl-2.0 | 203,876,569,544,358,080 | 36.267857 | 282 | 0.621466 | false |
GbalsaC/bitnamiP | lms/djangoapps/notes/models.py | 100 | 3156 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.utils.html import strip_tags
import json
from xmodule_django.models import CourseKeyField
class Note(models.Model):
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
uri = models.CharField(max_length=255, db_index=True)
text = models.TextField(default="")
quote = models.TextField(default="")
range_start = models.CharField(max_length=2048) # xpath string
range_start_offset = models.IntegerField()
range_end = models.CharField(max_length=2048) # xpath string
range_end_offset = models.IntegerField()
tags = models.TextField(default="") # comma-separated string
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
def clean(self, json_body):
"""
Cleans the note object or raises a ValidationError.
"""
if json_body is None:
raise ValidationError('Note must have a body.')
body = json.loads(json_body)
if not isinstance(body, dict):
raise ValidationError('Note body must be a dictionary.')
# NOTE: all three of these fields should be considered user input
# and may be output back to the user, so we need to sanitize them.
# These fields should only contain _plain text_.
self.uri = strip_tags(body.get('uri', ''))
self.text = strip_tags(body.get('text', ''))
self.quote = strip_tags(body.get('quote', ''))
ranges = body.get('ranges')
if ranges is None or len(ranges) != 1:
raise ValidationError('Note must contain exactly one range.')
self.range_start = ranges[0]['start']
self.range_start_offset = ranges[0]['startOffset']
self.range_end = ranges[0]['end']
self.range_end_offset = ranges[0]['endOffset']
self.tags = ""
tags = [strip_tags(tag) for tag in body.get('tags', [])]
if len(tags) > 0:
self.tags = ",".join(tags)
def get_absolute_url(self):
"""
Returns the absolute url for the note object.
"""
# pylint: disable=no-member
kwargs = {'course_id': self.course_id.to_deprecated_string(), 'note_id': str(self.pk)}
return reverse('notes_api_note', kwargs=kwargs)
def as_dict(self):
"""
Returns the note object as a dictionary.
"""
return {
'id': self.pk,
'user_id': self.user.pk,
'uri': self.uri,
'text': self.text,
'quote': self.quote,
'ranges': [{
'start': self.range_start,
'startOffset': self.range_start_offset,
'end': self.range_end,
'endOffset': self.range_end_offset
}],
'tags': self.tags.split(","),
'created': str(self.created),
'updated': str(self.updated)
}
| agpl-3.0 | -603,661,891,980,483,300 | 36.571429 | 94 | 0.60076 | false |
IllusionRom-deprecated/android_platform_external_chromium_org | tools/telemetry/telemetry/page/actions/click_element_unittest.py | 23 | 3183 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.page.actions import click_element
from telemetry.page.actions import wait
from telemetry.unittest import tab_test_case
class ClickElementActionTest(tab_test_case.TabTestCase):
def testClickWithSelectorWaitForNavigation(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'selector': 'a[id="clickme"]'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithSingleQuoteSelectorWaitForNavigation(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'selector': 'a[id=\'clickme\']'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithTextWaitForRefChange(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'text': 'Click me'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithXPathWaitForRefChange(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'xpath': '//a[@id="clickme"]'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
| bsd-3-clause | 6,434,020,205,041,921,000 | 36.447059 | 72 | 0.698712 | false |
vlegoff/tsunami | src/secondaires/crafting/commandes/__init__.py | 1 | 1676 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant les commandes du module crafting."""
from secondaires.crafting.commandes import guilde
| bsd-3-clause | -5,403,795,600,636,229,000 | 49.787879 | 79 | 0.782816 | false |
pap/nupic | tests/unit/nupic/support/consoleprinter_test/consoleprinter_test.py | 34 | 2887 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import unittest2 as unittest
from nupic.support.consoleprinter import ConsolePrinterMixin, Tee
# Class used for testing
class MyClass(ConsolePrinterMixin):
def __init__(self):
ConsolePrinterMixin.__init__(self)
def run(self):
for i in xrange(0, 4):
self.cPrint(i, "message at level %d", i)
class ConsolePrinterTest(unittest.TestCase):
def testPrint(self):
mydir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.abspath("console_output.txt")
if os.path.exists(filename):
os.remove(filename)
# Capture output to a file so that we can compare it
with Tee(filename):
c1 = MyClass()
print "Running with default verbosity"
c1.run()
print
print "Running with verbosity 2"
c1.consolePrinterVerbosity = 2
c1.run()
print
print "Running with verbosity 0"
c1.consolePrinterVerbosity = 0
c1.run()
print
c1.cPrint(0, "Message %s two %s", "with", "args")
c1.cPrint(0, "Message with no newline", newline=False)
c1.cPrint(0, " Message with newline")
c1.cPrint(0, "Message with %s and %s",
"no newline", "args", newline=False)
c1.cPrint(0, " Message with %s and %s", "newline", "args")
print "Done"
with self.assertRaises(KeyError):
c1.cPrint(0, "Message", badkw="badvalue")
referenceFilename = os.path.join(mydir, "consoleprinter_output.txt")
expected = open(referenceFilename).readlines()
actual = open(filename).readlines()
print ("Comparing files '%s'" % referenceFilename)
print ("and '%s'" % filename)
self.assertEqual(len(expected), len(actual))
for i in xrange(len(expected)):
self.assertEqual(expected[i].strip(), actual[i].strip())
# Clean up
os.remove(filename)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 3,753,009,007,812,555,000 | 27.303922 | 72 | 0.638379 | false |
ritchyteam/odoo | addons/l10n_fr_hr_payroll/__openerp__.py | 374 | 2165 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,053,482,730,837,610,000 | 36.982456 | 82 | 0.630023 | false |
pgleeson/TestArea | templates/clusterUtils/pullallsims.py | 5 | 1714 | # -*- coding: utf-8 -*-
'''
This file can be placed in the simulations directory of a neuroConstruct project and
when run it will search in all subdirectories for time.dat, and if it doesn't find it,
will try running pullsim.sh, which will attempt to retrieve the saved data from a remotely
executed simulation
'''
import os
import subprocess
path="."
pullSimFilename = "pullsim.sh"
dirList=os.listdir(path)
for fname in dirList:
if os.path.isdir(fname):
print "\n------ Checking directory: " + fname
timeFile = fname+"/time.dat"
pullsimFile = fname+"/"+pullSimFilename
if os.path.isfile(timeFile):
print "Time file exists! Simulation was successful."
else:
print "Time file doesn't exist!"
if os.path.isfile(pullsimFile):
print pullSimFilename+" exists and will be executed..."
process = subprocess.Popen("cd "+fname+";./"+pullSimFilename, shell=True, stdout=subprocess.PIPE)
stdout_value = process.communicate()[0]
process.wait()
print "Process has finished with return code: "+str(process.returncode)
output = repr(stdout_value)
formatted = output.replace("\\n", "\n\t")
print 'Output from running '+pullSimFilename+':\n\t', formatted
if os.path.isfile(timeFile):
print "Time file %s now exists, and so simulation was successful!"%timeFile
else:
print "Time file doesn't exist! Simulation hasn't successfully finished yet."
else:
print "No "+pullsimFile+", so cannot proceed further..."
| gpl-2.0 | -2,094,117,950,751,211,500 | 33.28 | 113 | 0.607935 | false |
vitan/hue | desktop/core/ext-py/Django-1.6.10/docs/_ext/applyxrefs.py | 132 | 1842 | """Adds xref targets to the top of files."""
import sys
import os
testing = False
DONT_TOUCH = (
'./index.txt',
)
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
with open(fn, 'w') as fp:
fp.writelines(lines)
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
def has_target(fn):
try:
with open(fn, 'r') as fp:
lines = fp.readlines()
except IOError:
print("Can't open or read %s. Not touching it." % fn)
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend([(dirpath, f) for f in filenames])
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
#print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print('%s: %s' % (fn, lines[0]))
else:
print("Adding xref to %s" % fn)
process_file(fn, lines)
else:
print("Skipping %s: already has a xref" % fn)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 5,769,537,809,609,714,000 | 24.232877 | 75 | 0.516287 | false |
gohin/django | django/http/cookie.py | 460 | 4390 | from __future__ import unicode_literals
import sys
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import http_cookies
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = http_cookies.SimpleCookie()
try:
_tc.load(str('foo:bar=1'))
_cookie_allows_colon_in_names = True
except http_cookies.CookieError:
_cookie_allows_colon_in_names = False
# Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+
# http://bugs.python.org/issue22775
cookie_pickles_properly = (
(sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or
sys.version_info >= (3, 4, 3)
)
if _cookie_encodes_correctly and _cookie_allows_colon_in_names and cookie_pickles_properly:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not cookie_pickles_properly:
def __setitem__(self, key, value):
# Apply the fix from http://bugs.python.org/issue22775 where
# it's not fixed in Python itself
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
super(SimpleCookie, self).__setitem__(key, value)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",", "\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if six.PY2 and isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
if not hasattr(self, 'bad_cookies'):
self.bad_cookies = set()
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, http_cookies.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except http_cookies.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
| bsd-3-clause | 4,626,601,587,904,040,000 | 40.415094 | 94 | 0.572893 | false |
witcxc/libpinyin | scripts/pinyin.py | 2 | 13821 | # -*- coding: utf-8 -*-
# vim:set et sts=4 sw=4:
#
# libpinyin - Library to deal with pinyin.
#
# Copyright (c) 2007-2008 Peng Huang <[email protected]>
# Copyright (C) 2011 Peng Wu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
N_ = lambda x : x
PINYIN_DICT = {
"a" : 1, "ai" : 2, "an" : 3, "ang" : 4, "ao" : 5,
"ba" : 6, "bai" : 7, "ban" : 8, "bang" : 9, "bao" : 10,
"bei" : 11, "ben" : 12, "beng" : 13, "bi" : 14, "bian" : 15,
"biao" : 16, "bie" : 17, "bin" : 18, "bing" : 19, "bo" : 20,
"bu" : 21, "ca" : 22, "cai" : 23, "can" : 24, "cang" : 25,
"cao" : 26, "ce" : 27, "cen" : 28, "ceng" : 29, "ci" : 30,
"cong" : 31, "cou" : 32, "cu" : 33, "cuan" : 34, "cui" : 35,
"cun" : 36, "cuo" : 37, "cha" : 38, "chai" : 39, "chan" : 40,
"chang" : 41, "chao" : 42, "che" : 43, "chen" : 44, "cheng" : 45,
"chi" : 46, "chong" : 47, "chou" : 48, "chu" : 49, "chuai" : 50,
"chuan" : 51, "chuang" : 52, "chui" : 53, "chun" : 54, "chuo" : 55,
"da" : 56, "dai" : 57, "dan" : 58, "dang" : 59, "dao" : 60,
"de" : 61, "dei" : 62,
# "den" : 63,
"deng" : 64, "di" : 65,
"dia" : 66, "dian" : 67, "diao" : 68, "die" : 69, "ding" : 70,
"diu" : 71, "dong" : 72, "dou" : 73, "du" : 74, "duan" : 75,
"dui" : 76, "dun" : 77, "duo" : 78, "e" : 79, "ei" : 80,
"en" : 81, "er" : 82, "fa" : 83, "fan" : 84, "fang" : 85,
"fei" : 86, "fen" : 87, "feng" : 88, "fo" : 89, "fou" : 90,
"fu" : 91, "ga" : 92, "gai" : 93, "gan" : 94, "gang" : 95,
"gao" : 96, "ge" : 97, "gei" : 98, "gen" : 99, "geng" : 100,
"gong" : 101, "gou" : 102, "gu" : 103, "gua" : 104, "guai" : 105,
"guan" : 106, "guang" : 107, "gui" : 108, "gun" : 109, "guo" : 110,
"ha" : 111, "hai" : 112, "han" : 113, "hang" : 114, "hao" : 115,
"he" : 116, "hei" : 117, "hen" : 118, "heng" : 119, "hong" : 120,
"hou" : 121, "hu" : 122, "hua" : 123, "huai" : 124, "huan" : 125,
"huang" : 126, "hui" : 127, "hun" : 128, "huo" : 129, "ji" : 130,
"jia" : 131, "jian" : 132, "jiang" : 133, "jiao" : 134, "jie" : 135,
"jin" : 136, "jing" : 137, "jiong" : 138, "jiu" : 139, "ju" : 140,
"juan" : 141, "jue" : 142, "jun" : 143, "ka" : 144, "kai" : 145,
"kan" : 146, "kang" : 147, "kao" : 148, "ke" : 149,
# "kei" : 150,
"ken" : 151, "keng" : 152, "kong" : 153, "kou" : 154, "ku" : 155,
"kua" : 156, "kuai" : 157, "kuan" : 158, "kuang" : 159, "kui" : 160,
"kun" : 161, "kuo" : 162, "la" : 163, "lai" : 164, "lan" : 165,
"lang" : 166, "lao" : 167, "le" : 168, "lei" : 169, "leng" : 170,
"li" : 171, "lia" : 172, "lian" : 173, "liang" : 174, "liao" : 175,
"lie" : 176, "lin" : 177, "ling" : 178, "liu" : 179,
"lo" : 180,
"long" : 181, "lou" : 182, "lu" : 183, "luan" : 184,
# "lue" : 185,
"lun" : 186, "luo" : 187, "lv" : 188, "lve" : 189,
"ma" : 190,
"mai" : 191, "man" : 192, "mang" : 193, "mao" : 194, "me" : 195,
"mei" : 196, "men" : 197, "meng" : 198, "mi" : 199, "mian" : 200,
"miao" : 201, "mie" : 202, "min" : 203, "ming" : 204, "miu" : 205,
"mo" : 206, "mou" : 207, "mu" : 208, "na" : 209, "nai" : 210,
"nan" : 211, "nang" : 212, "nao" : 213, "ne" : 214, "nei" : 215,
"nen" : 216, "neng" : 217, "ni" : 218, "nian" : 219, "niang" : 220,
"niao" : 221, "nie" : 222, "nin" : 223, "ning" : 224, "niu" : 225,
"ng" : 226,
"nong" : 227, "nou" : 228, "nu" : 229, "nuan" : 230,
# "nue" : 231,
"nuo" : 232, "nv" : 233, "nve" : 234,
"o" : 235,
"ou" : 236, "pa" : 237, "pai" : 238, "pan" : 239, "pang" : 240,
"pao" : 241, "pei" : 242, "pen" : 243, "peng" : 244, "pi" : 245,
"pian" : 246, "piao" : 247, "pie" : 248, "pin" : 249, "ping" : 250,
"po" : 251, "pou" : 252, "pu" : 253, "qi" : 254, "qia" : 255,
"qian" : 256, "qiang" : 257, "qiao" : 258, "qie" : 259, "qin" : 260,
"qing" : 261, "qiong" : 262, "qiu" : 263, "qu" : 264, "quan" : 265,
"que" : 266, "qun" : 267, "ran" : 268, "rang" : 269, "rao" : 270,
"re" : 271, "ren" : 272, "reng" : 273, "ri" : 274, "rong" : 275,
"rou" : 276, "ru" : 277, "ruan" : 278, "rui" : 279, "run" : 280,
"ruo" : 281, "sa" : 282, "sai" : 283, "san" : 284, "sang" : 285,
"sao" : 286, "se" : 287, "sen" : 288, "seng" : 289, "si" : 290,
"song" : 291, "sou" : 292, "su" : 293, "suan" : 294, "sui" : 295,
"sun" : 296, "suo" : 297, "sha" : 298, "shai" : 299, "shan" : 300,
"shang" : 301, "shao" : 302, "she" : 303, "shei" : 304, "shen" : 305,
"sheng" : 306, "shi" : 307, "shou" : 308, "shu" : 309, "shua" : 310,
"shuai" : 311, "shuan" : 312, "shuang" : 313, "shui" : 314, "shun" : 315,
"shuo" : 316, "ta" : 317, "tai" : 318, "tan" : 319, "tang" : 320,
"tao" : 321, "te" : 322,
# "tei" : 323,
"teng" : 324, "ti" : 325,
"tian" : 326, "tiao" : 327, "tie" : 328, "ting" : 329, "tong" : 330,
"tou" : 331, "tu" : 332, "tuan" : 333, "tui" : 334, "tun" : 335,
"tuo" : 336, "wa" : 337, "wai" : 338, "wan" : 339, "wang" : 340,
"wei" : 341, "wen" : 342, "weng" : 343, "wo" : 344, "wu" : 345,
"xi" : 346, "xia" : 347, "xian" : 348, "xiang" : 349, "xiao" : 350,
"xie" : 351, "xin" : 352, "xing" : 353, "xiong" : 354, "xiu" : 355,
"xu" : 356, "xuan" : 357, "xue" : 358, "xun" : 359, "ya" : 360,
"yan" : 361, "yang" : 362, "yao" : 363, "ye" : 364, "yi" : 365,
"yin" : 366, "ying" : 367, "yo" : 368, "yong" : 369, "you" : 370,
"yu" : 371, "yuan" : 372, "yue" : 373, "yun" : 374, "za" : 375,
"zai" : 376, "zan" : 377, "zang" : 378, "zao" : 379, "ze" : 380,
"zei" : 381, "zen" : 382, "zeng" : 383, "zi" : 384, "zong" : 385,
"zou" : 386, "zu" : 387, "zuan" : 388, "zui" : 389, "zun" : 390,
"zuo" : 391, "zha" : 392, "zhai" : 393, "zhan" : 394, "zhang" : 395,
"zhao" : 396, "zhe" : 397, "zhen" : 398, "zheng" : 399, "zhi" : 400,
"zhong" : 401, "zhou" : 402, "zhu" : 403, "zhua" : 404, "zhuai" : 405,
"zhuan" : 406, "zhuang" : 407, "zhui" : 408, "zhun" : 409, "zhuo" : 410,
# some weird pinyins
#~ "eng" : 411, "chua" : 412, "fe" : 413, "fiao" : 414, "liong" : 415
}
PINYIN_LIST = PINYIN_DICT.keys ()
SHENGMU_DICT = {
"b" : 1, "p" : 2, "m" : 3, "f" : 4, "d" : 5,
"t" : 6, "n" : 7, "l" : 8, "g" : 9, "k" : 10, "h" : 11,
"j" : 12, "q" : 13, "x" : 14, "zh" : 15, "ch" : 16, "sh" : 17,
"r" : 18, "z" : 19, "c" : 20, "s" : 21, "y" : 22, "w" : 23
}
SHENGMU_LIST = SHENGMU_DICT.keys ()
YUNMU_DICT = {
"a" : 1, "ai" : 2, "an" : 3, "ang" : 4, "ao" : 5,
"e" : 6, "ei" : 7, "en" : 8, "eng" : 9, "er" : 10,
"i" : 11, "ia" : 12, "ian" : 13, "iang" : 14, "iao" : 15,
"ie" : 16, "in" : 17, "ing" : 18, "iong" : 19, "iu" : 20,
"o" : 21, "ong" : 22, "ou" : 23, "u" : 24, "ua" : 25,
"uai" : 26, "uan" : 27, "uang" : 28, "ue" : 29, "ui" : 30,
"un" : 31, "uo" : 32, "v" : 33, "ve" : 34
}
YUNMU_LIST = YUNMU_DICT.keys ()
MOHU_SHENGMU = {
"z" : ("z", "zh"),
"zh" : ("z", "zh"),
"c" : ("c", "ch"),
"ch" : ("c", "ch"),
"s" : ("s", "sh"),
"sh" : ("s", "sh"),
"l" : ("l", "n"),
"n" : ("l", "n")
}
MOHU_YUNMU = {
"an" : ("an", "ang"),
"ang" : ("an", "ang"),
"en" : ("en", "eng"),
"eng" : ("en", "eng"),
"in" : ("in", "ing"),
"ing" : ("in", "ing")
}
MSPY_SHUANGPIN_SHENGMU_DICT = {
"b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "ch","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "sh","v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
MSPY_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ou",),
"c" : ("iao",),
"d" : ("uang", "iang"),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("ao",),
"l" : ("ai",),
"m" : ("ian",),
"n" : ("in",),
"o" : ("uo", "o"),
"p" : ("un",),
"q" : ("iu",),
"r" : ("uan", "er"),
"s" : ("ong", "iong"),
"t" : ("ue",),
"u" : ("u",),
"v" : ("ui","ue"),
"w" : ("ia","ua"),
"x" : ("ie",),
"y" : ("uai", "v"),
"z" : ("ei",),
";" : ("ing",)
}
ZRM_SHUANGPIN_SHENGMU_DICT = {
"b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "ch","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "sh","v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
ZRM_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ou",),
"c" : ("iao",),
"d" : ("uang", "iang"),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("ao",),
"l" : ("ai",),
"m" : ("ian",),
"n" : ("in",),
"o" : ("uo", "o"),
"p" : ("un",),
"q" : ("iu",),
"r" : ("uan", "er"),
"s" : ("ong", "iong"),
"t" : ("ue",),
"u" : ("u",),
"v" : ("ui","v"),
"w" : ("ia","ua"),
"x" : ("ie",),
"y" : ("uai", "ing"),
"z" : ("ei",),
}
ABC_SHUANGPIN_SHENGMU_DICT = {
"a" : "zh", "b" : "b", "c" : "c", "d" : "d", "e":"ch", "f" : "f", "g" : "g",
"h" : "h", "j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "v" : "sh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
ABC_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ou",),
"c" : ("in","uai"),
"d" : ("ia", "ua"),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("ao",),
"l" : ("ai",),
"m" : ("ue","ui"),
"n" : ("un",),
"o" : ("uo", "o"),
"p" : ("uan",),
"q" : ("ei",),
"r" : ("er", "iu"),
"s" : ("ong", "iong"),
"t" : ("iang","uang"),
"u" : ("u",),
"v" : ("v","ue"),
"w" : ("ian",),
"x" : ("ie",),
"y" : ("ing",),
"z" : ("iao",),
}
ZGPY_SHUANGPIN_SHENGMU_DICT = {
"a" : "ch", "b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "sh","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
ZGPY_SHUANGPIN_YUNMU_DICT = {
"a" : ("a", ),
"b" : ("iao", ),
"d" : ("ie", ),
"e" : ("e", ),
"f" : ("ian", ),
"g" : ("iang", "uang"),
"h" : ("ong", "iong"),
"i" : ("i", ),
"j" : ("er", "iu"),
"k" : ("ei", ),
"l" : ("uan", ),
"m" : ("un", ),
"n" : ("ue", "ui"),
"o" : ("uo", "o"),
"p" : ("ai", ),
"q" : ("ao", ),
"r" : ("an", ),
"s" : ("ang", ),
"t" : ("eng", "ng"),
"u" : ("u", ),
"v" : ("v", ),
"w" : ("en", ),
"x" : ("ia", "ua"),
"y" : ("in", "uai"),
"z" : ("ou" ,),
";" : ("ing", )
}
PYJJ_SHUANGPIN_SHENGMU_DICT = {
"a" : "'", "b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "sh","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "ch","v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
PYJJ_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ia","ua"),
"c" : ("uan",),
"d" : ("ao", ),
"e" : ("e",),
"f" : ("an",),
"g" : ("ang",),
"h" : ("iang","uang"),
"i" : ("i",),
"j" : ("ian",),
"k" : ("iao",),
"l" : ("in",),
"m" : ("ie",),
"n" : ("iu",),
"o" : ("uo", "o"),
"p" : ("ou",),
"q" : ("er","ing"),
"r" : ("en", ),
"s" : ("ai", ),
"t" : ("eng", "ng"),
"u" : ("u",),
"v" : ("v","ui"),
"w" : ("ei",),
"x" : ("uai","ue"),
"y" : ("ong","iong"),
"z" : ("un",),
}
XHE_SHUANGPIN_SHENGMU_DICT = {
"b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "ch", "j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "sh", "v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z",
"a" : "'", "e" : "'"
}
XHE_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("in",),
"c" : ("ao",),
"d" : ("ai",),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("uai", "ing"),
"l" : ("iang", "uang"),
"m" : ("ian",),
"n" : ("iao",),
"o" : ("uo", "o"),
"p" : ("ie",),
"q" : ("iu",),
"r" : ("uan", "er"),
"s" : ("ong", "iong"),
"t" : ("ue",),
"u" : ("u",),
"v" : ("v", "ui"),
"w" : ("ei",),
"x" : ("ia", "ua"),
"y" : ("un",),
"z" : ("ou",),
}
SHUANGPIN_SCHEMAS = {
N_("MSPY") : (MSPY_SHUANGPIN_SHENGMU_DICT, MSPY_SHUANGPIN_YUNMU_DICT),
N_("ZRM") : (ZRM_SHUANGPIN_SHENGMU_DICT, ZRM_SHUANGPIN_YUNMU_DICT),
N_("ABC") : (ABC_SHUANGPIN_SHENGMU_DICT, ABC_SHUANGPIN_YUNMU_DICT),
N_("ZGPY") : (ZGPY_SHUANGPIN_SHENGMU_DICT, ZGPY_SHUANGPIN_YUNMU_DICT),
N_("PYJJ") : (PYJJ_SHUANGPIN_SHENGMU_DICT, PYJJ_SHUANGPIN_YUNMU_DICT),
N_("XHE") : (XHE_SHUANGPIN_SHENGMU_DICT, XHE_SHUANGPIN_YUNMU_DICT),
}
| gpl-2.0 | 391,503,750,518,905,100 | 33.5525 | 81 | 0.36705 | false |
saimn/astropy | astropy/nddata/_testing.py | 11 | 1605 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing utilities. Not part of the public API!"""
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
def assert_wcs_seem_equal(wcs1, wcs2):
"""Just checks a few attributes to make sure wcs instances seem to be
equal.
"""
if wcs1 is None and wcs2 is None:
return
assert wcs1 is not None
assert wcs2 is not None
if isinstance(wcs1, BaseHighLevelWCS):
wcs1 = wcs1.low_level_wcs
if isinstance(wcs2, BaseHighLevelWCS):
wcs2 = wcs2.low_level_wcs
assert isinstance(wcs1, WCS)
assert isinstance(wcs2, WCS)
if wcs1 is wcs2:
return
assert wcs1.wcs.compare(wcs2.wcs)
def _create_wcs_simple(naxis, ctype, crpix, crval, cdelt):
wcs = WCS(naxis=naxis)
wcs.wcs.crpix = crpix
wcs.wcs.crval = crval
wcs.wcs.cdelt = cdelt
wcs.wcs.ctype = ctype
return wcs
def create_two_equal_wcs(naxis):
return [
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis),
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis)
]
def create_two_unequal_wcs(naxis):
return [
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis),
_create_wcs_simple(
naxis=naxis, ctype=["m"]*naxis, crpix=[20]*naxis,
crval=[20]*naxis, cdelt=[2]*naxis),
]
| bsd-3-clause | -7,296,106,230,798,705,000 | 28.181818 | 73 | 0.614953 | false |
mascot6699/Hackapi-Demo | src/core/views.py | 1 | 1412 |
from . import models, serializers, utils
from datetime import datetime, timedelta
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, permissions
class Process(APIView):
"""
These apis are for general purpose
"""
def get(self, request, format=None):
"""
GET api for getting particular template from id
SmsSid string, unique identifier of that SMS
From string, the number of the sender
To string, your Exotel Company number where the SMS was received
Date string, Time when the SMS reached Exotel's servers
Body string, the contents of the SMS
"""
parsed_content = request.query_params.get("Body").split(' ')
# garbage = parsed_content[0].lower()
keyword= parsed_content[1].lower()
body = (" ".join(parsed_content[2:])).lower()
print body, keyword
if keyword=="hello":
body = utils.get_help()
if keyword=="wiki":
body = utils.process_wiki(body)
elif keyword=="dictionary":
body = utils.process_dictionary(body)
elif keyword=="email":
body = utils.custom_send_email(body)
elif keyword=="song":
body = utils.custom_send_email(body)
return Response(body, status=status.HTTP_200_OK, content_type="text/plain")
| mit | -4,467,069,860,691,169,300 | 31.090909 | 83 | 0.631728 | false |
altaf-ali/luigi | test/scheduler_visualisation_test.py | 49 | 13542 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import tempfile
import time
from helpers import unittest
import luigi
import luigi.notifications
import luigi.scheduler
import luigi.worker
luigi.notifications.DEBUG = True
tempdir = tempfile.mkdtemp()
class DummyTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, str(self.task_id)))
class FactorTask(luigi.Task):
product = luigi.Parameter()
def requires(self):
for factor in range(2, self.product):
if self.product % factor == 0:
yield FactorTask(factor)
yield FactorTask(self.product // factor)
return
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, 'luigi_test_factor_%d' % self.product))
class BadReqTask(luigi.Task):
succeed = luigi.BoolParameter()
def requires(self):
assert self.succeed
yield BadReqTask(False)
def run(self):
pass
def complete(self):
return False
class FailingTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
raise Exception("Error Message")
class SchedulerVisualisationTest(unittest.TestCase):
def setUp(self):
self.scheduler = luigi.scheduler.CentralPlannerScheduler()
def tearDown(self):
pass
def _assert_complete(self, tasks):
for t in tasks:
self.assert_(t.complete())
def _build(self, tasks):
w = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
for t in tasks:
w.add(t)
w.run()
w.stop()
def _remote(self):
return self.scheduler
def _test_run(self, workers):
tasks = [DummyTask(i) for i in range(20)]
self._build(tasks, workers=workers)
self._assert_complete(tasks)
def test_graph(self):
start = time.time()
tasks = [DummyTask(task_id=1), DummyTask(task_id=2)]
self._build(tasks)
self._assert_complete(tasks)
end = time.time()
remote = self._remote()
graph = remote.graph()
self.assertEqual(len(graph), 2)
self.assert_(u'DummyTask(task_id=1)' in graph)
d1 = graph[u'DummyTask(task_id=1)']
self.assertEqual(d1[u'status'], u'DONE')
self.assertEqual(d1[u'deps'], [])
self.assertGreaterEqual(d1[u'start_time'], start)
self.assertLessEqual(d1[u'start_time'], end)
d2 = graph[u'DummyTask(task_id=2)']
self.assertEqual(d2[u'status'], u'DONE')
self.assertEqual(d2[u'deps'], [])
self.assertGreaterEqual(d2[u'start_time'], start)
self.assertLessEqual(d2[u'start_time'], end)
def _assert_all_done(self, tasks):
self._assert_all(tasks, u'DONE')
def _assert_all(self, tasks, status):
for task in tasks.values():
self.assertEqual(task[u'status'], status)
def test_dep_graph_single(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=1)')
self.assertEqual(len(dep_graph), 1)
self._assert_all_done(dep_graph)
d1 = dep_graph.get(u'FactorTask(product=1)')
self.assertEqual(type(d1), type({}))
self.assertEqual(d1[u'deps'], [])
def test_dep_graph_not_found(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=5)')
self.assertEqual(len(dep_graph), 0)
def test_dep_graph_tree(self):
self._build([FactorTask(30)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=30)')
self.assertEqual(len(dep_graph), 5)
self._assert_all_done(dep_graph)
d30 = dep_graph[u'FactorTask(product=30)']
self.assertEqual(sorted(d30[u'deps']), [u'FactorTask(product=15)', 'FactorTask(product=2)'])
d2 = dep_graph[u'FactorTask(product=2)']
self.assertEqual(sorted(d2[u'deps']), [])
d15 = dep_graph[u'FactorTask(product=15)']
self.assertEqual(sorted(d15[u'deps']), [u'FactorTask(product=3)', 'FactorTask(product=5)'])
d3 = dep_graph[u'FactorTask(product=3)']
self.assertEqual(sorted(d3[u'deps']), [])
d5 = dep_graph[u'FactorTask(product=5)']
self.assertEqual(sorted(d5[u'deps']), [])
def test_dep_graph_missing_deps(self):
self._build([BadReqTask(True)])
dep_graph = self._remote().dep_graph('BadReqTask(succeed=True)')
self.assertEqual(len(dep_graph), 2)
suc = dep_graph[u'BadReqTask(succeed=True)']
self.assertEqual(suc[u'deps'], [u'BadReqTask(succeed=False)'])
fail = dep_graph[u'BadReqTask(succeed=False)']
self.assertEqual(fail[u'name'], 'BadReqTask')
self.assertEqual(fail[u'params'], {'succeed': 'False'})
self.assertEqual(fail[u'status'], 'UNKNOWN')
def test_dep_graph_diamond(self):
self._build([FactorTask(12)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=12)')
self.assertEqual(len(dep_graph), 4)
self._assert_all_done(dep_graph)
d12 = dep_graph[u'FactorTask(product=12)']
self.assertEqual(sorted(d12[u'deps']), [u'FactorTask(product=2)', 'FactorTask(product=6)'])
d6 = dep_graph[u'FactorTask(product=6)']
self.assertEqual(sorted(d6[u'deps']), [u'FactorTask(product=2)', 'FactorTask(product=3)'])
d3 = dep_graph[u'FactorTask(product=3)']
self.assertEqual(sorted(d3[u'deps']), [])
d2 = dep_graph[u'FactorTask(product=2)']
self.assertEqual(sorted(d2[u'deps']), [])
def test_task_list_single(self):
self._build([FactorTask(7)])
remote = self._remote()
tasks_done = remote.task_list('DONE', '')
self.assertEqual(len(tasks_done), 1)
self._assert_all_done(tasks_done)
t7 = tasks_done.get(u'FactorTask(product=7)')
self.assertEqual(type(t7), type({}))
self.assertEqual(remote.task_list('', ''), tasks_done)
self.assertEqual(remote.task_list('FAILED', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_failed(self):
self._build([FailingTask(8)])
remote = self._remote()
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
f8 = failed.get(u'FailingTask(task_id=8)')
self.assertEqual(f8[u'status'], u'FAILED')
self.assertEqual(remote.task_list('DONE', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_upstream_status(self):
class A(luigi.ExternalTask):
pass
class B(luigi.ExternalTask):
def complete(self):
return True
class C(luigi.Task):
def requires(self):
return [A(), B()]
class F(luigi.Task):
def run(self):
raise Exception()
class D(luigi.Task):
def requires(self):
return [F()]
class E(luigi.Task):
def requires(self):
return [C(), D()]
self._build([E()])
remote = self._remote()
done = remote.task_list('DONE', '')
self.assertEqual(len(done), 1)
db = done.get('B()')
self.assertEqual(db['status'], 'DONE')
missing_input = remote.task_list('PENDING', 'UPSTREAM_MISSING_INPUT')
self.assertEqual(len(missing_input), 2)
pa = missing_input.get(u'A()')
self.assertEqual(pa['status'], 'PENDING')
self.assertEqual(remote._upstream_status('A()', {}), 'UPSTREAM_MISSING_INPUT')
pc = missing_input.get(u'C()')
self.assertEqual(pc['status'], 'PENDING')
self.assertEqual(remote._upstream_status('C()', {}), 'UPSTREAM_MISSING_INPUT')
upstream_failed = remote.task_list('PENDING', 'UPSTREAM_FAILED')
self.assertEqual(len(upstream_failed), 2)
pe = upstream_failed.get(u'E()')
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status('E()', {}), 'UPSTREAM_FAILED')
pe = upstream_failed.get(u'D()')
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status('D()', {}), 'UPSTREAM_FAILED')
pending = dict(missing_input)
pending.update(upstream_failed)
self.assertEqual(remote.task_list('PENDING', ''), pending)
self.assertEqual(remote.task_list('PENDING', 'UPSTREAM_RUNNING'), {})
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
fd = failed.get('F()')
self.assertEqual(fd['status'], 'FAILED')
all = dict(pending)
all.update(done)
all.update(failed)
self.assertEqual(remote.task_list('', ''), all)
self.assertEqual(remote.task_list('RUNNING', ''), {})
def test_task_search(self):
self._build([FactorTask(8)])
self._build([FailingTask(8)])
remote = self._remote()
all_tasks = remote.task_search('Task')
self.assertEqual(len(all_tasks), 2)
self._assert_all(all_tasks['DONE'], 'DONE')
self._assert_all(all_tasks['FAILED'], 'FAILED')
def test_fetch_error(self):
self._build([FailingTask(8)])
remote = self._remote()
error = remote.fetch_error("FailingTask(task_id=8)")
self.assertEqual(error["taskId"], "FailingTask(task_id=8)")
self.assertTrue("Error Message" in error["error"])
self.assertTrue("Runtime error" in error["error"])
self.assertTrue("Traceback" in error["error"])
def test_inverse_deps(self):
class X(luigi.Task):
pass
class Y(luigi.Task):
def requires(self):
return [X()]
class Z(luigi.Task):
id = luigi.Parameter()
def requires(self):
return [Y()]
class ZZ(luigi.Task):
def requires(self):
return [Z(1), Z(2)]
self._build([ZZ()])
dep_graph = self._remote().inverse_dep_graph('X()')
def assert_has_deps(task_id, deps):
self.assertTrue(task_id in dep_graph, '%s not in dep_graph %s' % (task_id, dep_graph))
task = dep_graph[task_id]
self.assertEqual(sorted(task['deps']), sorted(deps), '%s does not have deps %s' % (task_id, deps))
assert_has_deps('X()', ['Y()'])
assert_has_deps('Y()', ['Z(id=1)', 'Z(id=2)'])
assert_has_deps('Z(id=1)', ['ZZ()'])
assert_has_deps('Z(id=2)', ['ZZ()'])
assert_has_deps('ZZ()', [])
def test_simple_worker_list(self):
class X(luigi.Task):
def run(self):
self._complete = True
def complete(self):
return getattr(self, '_complete', False)
self._build([X()])
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual('X()', worker['first_task'])
self.assertEqual(0, worker['num_pending'])
self.assertEqual(0, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
self.assertEqual(1, worker['workers'])
def test_worker_list_pending_uniques(self):
class X(luigi.Task):
def complete(self):
return False
class Y(X):
def requires(self):
return X()
class Z(Y):
pass
w1 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w2 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w1.add(Y())
w2.add(Z())
workers = self._remote().worker_list()
self.assertEqual(2, len(workers))
for worker in workers:
self.assertEqual(2, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
def test_worker_list_running(self):
class X(luigi.Task):
n = luigi.IntParameter()
w = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=3)
w.add(X(0))
w.add(X(1))
w.add(X(2))
w.add(X(3))
w._get_work()
w._get_work()
w._get_work()
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual(3, worker['num_running'])
self.assertEqual(1, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,280,088,414,123,040,300 | 30.131034 | 110 | 0.583001 | false |
uwevil/namebench | libnamebench/selectors.py | 176 | 3262 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ways to select hostname records to test."""
import math
import random
# When running a weighted distribution, never repeat a domain more than this:
MAX_REPEAT = 3
TYPES = {
'automatic': 'Pick the most appropriate selector type for the data source',
'weighted': 'Chooses based on a weighted distribution, preferring entries in the top of the list',
'random': 'Random selection, including repeats.',
'chunk': 'Chooses a random contiguous segment of entries'
}
def MaxRepeatCount(elements, count):
# Avoid stalling out looking for the one unique choice
if count >= len(elements) * 0.5:
return 2**32
else:
return MAX_REPEAT
def GetTypes():
"""Return a tuple of type names with a description."""
return sorted(TYPES.keys())
def WeightedDistribution(elements, count):
"""Given a set of elements, return a weighted distribution back.
Args:
elements: A list of elements to choose from
count: how many elements to return
Returns:
A random but fairly distributed list of elements of count count.
The distribution is designed to mimic real-world DNS usage. The observed
formula for request popularity was:
522.520776 * math.pow(x, -0.998506)-2
"""
def FindY(x, total):
return total * math.pow(x, -0.408506)
total = len(elements)
picks = []
picked = {}
offset = FindY(total, total)
max_repeat = MaxRepeatCount(elements, count)
attempts = 0
while len(picks) < count:
attempts += 1
# avoid dead-lock
if attempts > (count * 4):
break
x = random.random() * total
y = FindY(x, total) - offset
index = abs(int(y))
if index < total:
if picked.get(index, 0) < max_repeat:
picks.append(elements[index])
picked[index] = picked.get(index, 0) + 1
# print '%s: %s' % (index, elements[index])
return picks
def ChunkSelect(elements, count):
"""Return a random count-sized contiguous chunk of elements."""
if len(elements) <= count:
return elements
start = random.randint(0, len(elements) - count)
return elements[start:start + count]
def RandomSelect(elements, count, include_duplicates=False):
"""Randomly select elements, but enforce duplication limits."""
picks = []
picked = {}
if include_duplicates:
max_repeat = 2**32
else:
max_repeat = MaxRepeatCount(elements, count)
attempts = 0
while len(picks) < count:
attempts += 1
# avoid dead-lock
if attempts > (count * 4):
break
choice = random.choice(elements)
if picked.get(choice, 0) < max_repeat:
picks.append(choice)
picked[choice] = picked.get(choice, 0) + 1
return picks
| apache-2.0 | 2,139,029,900,928,761,600 | 28.125 | 102 | 0.686695 | false |
TiVoMaker/boto | tests/integration/ec2/autoscale/test_cert_verification.py | 126 | 1575 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.ec2.autoscale
class AutoscaleCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
autoscale = True
regions = boto.ec2.autoscale.regions()
def sample_service_call(self, conn):
conn.get_all_groups()
| mit | 4,907,623,829,306,504,000 | 39.384615 | 84 | 0.767619 | false |
asutherland/opc-reviewboard | contrib/internal/release.py | 1 | 1785 | #!/usr/bin/env python
#
# Performs a release of Review Board. This can only be run by the core
# developers with release permissions.
#
import os
import re
import subprocess
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from reviewboard import get_package_version, VERSION
PY_VERSIONS = ["2.4", "2.5", "2.6"]
LATEST_PY_VERSION = PY_VERSIONS[-1]
PACKAGE_NAME = 'ReviewBoard'
RELEASES_URL = \
'reviewboard.org:/var/www/downloads.reviewboard.org/' \
'htdocs/releases/%s/%s.%s/' % (PACKAGE_NAME, VERSION[0], VERSION[1])
built_files = []
def execute(cmdline):
print ">>> %s" % cmdline
if os.system(cmdline) != 0:
print "!!! Error invoking command."
sys.exit(1)
def run_setup(target, pyver = LATEST_PY_VERSION):
execute("python%s ./setup.py release %s" % (pyver, target))
def clean():
execute("rm -rf build dist")
def build_targets():
for pyver in PY_VERSIONS:
run_setup("bdist_egg", pyver)
built_files.append("dist/%s-%s-py%s.egg" %
(PACKAGE_NAME, get_package_version(), pyver))
run_setup("sdist")
built_files.append("dist/%s-%s.tar.gz" %
(PACKAGE_NAME, get_package_version()))
def upload_files():
execute("scp %s %s" % (" ".join(built_files), RELEASES_URL))
def tag_release():
execute("git tag release-%s" % get_package_version())
def register_release():
run_setup("register")
def main():
if not os.path.exists("setup.py"):
sys.stderr.write("This must be run from the root of the "
"Review Board tree.\n")
sys.exit(1)
clean()
build_targets()
upload_files()
tag_release()
register_release()
if __name__ == "__main__":
main()
| mit | -4,015,365,322,817,112,600 | 20.768293 | 72 | 0.60056 | false |
sliz1/servo | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-line-height-tests.py | 829 | 3431 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
| mpl-2.0 | 7,694,887,479,603,598,000 | 40.597561 | 213 | 0.634125 | false |
tordans/volunteer_planner | scheduler/management/commands/calculate_volunteer_hours.py | 3 | 1164 | # coding: utf-8
import datetime
from django.core.management.base import BaseCommand
from registration.models import RegistrationProfile
from stats.models import ValueStore
class Command(BaseCommand):
help = 'creates bulk shifts from existing data'
args = ""
option_list = BaseCommand.option_list
def handle(self, *args, **options):
shifts = RegistrationProfile.objects.all() \
.filter(needs__starting_time__lte=datetime.datetime.now()) \
.only('needs__starting_time', 'needs__ending_time')
# .prefetch_related('needs')
total_seconds = 0.0
for shift in shifts:
needs_in_shift = shift.needs.all()
for single_shift in needs_in_shift:
delta = single_shift.ending_time - single_shift.starting_time
total_seconds += delta.total_seconds()
total_hours = int(total_seconds) / 3600
value_object, created = ValueStore.objects.get_or_create(
name="total-volunteer-hours", defaults=dict(value=total_hours))
if not created:
value_object.value = total_hours
value_object.save()
| agpl-3.0 | 6,819,728,927,408,078,000 | 32.257143 | 77 | 0.636598 | false |
syedjafri/ThinkStats2 | code/chap02soln.py | 69 | 2263 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
from operator import itemgetter
import first
import thinkstats2
def Mode(hist):
"""Returns the value with the highest frequency.
hist: Hist object
returns: value from Hist
"""
p, x = max([(p, x) for x, p in hist.Items()])
return x
def AllModes(hist):
"""Returns value-freq pairs in decreasing order of frequency.
hist: Hist object
returns: iterator of value-freq pairs
"""
return sorted(hist.Items(), key=itemgetter(1), reverse=True)
def WeightDifference(live, firsts, others):
"""Explore the difference in weight between first babies and others.
live: DataFrame of all live births
firsts: DataFrame of first babies
others: DataFrame of others
"""
mean0 = live.totalwgt_lb.mean()
mean1 = firsts.totalwgt_lb.mean()
mean2 = others.totalwgt_lb.mean()
var1 = firsts.totalwgt_lb.var()
var2 = others.totalwgt_lb.var()
print('Mean')
print('First babies', mean1)
print('Others', mean2)
print('Variance')
print('First babies', var1)
print('Others', var2)
print('Difference in lbs', mean1 - mean2)
print('Difference in oz', (mean1 - mean2) * 16)
print('Difference relative to mean (%age points)',
(mean1 - mean2) / mean0 * 100)
d = thinkstats2.CohenEffectSize(firsts.totalwgt_lb, others.totalwgt_lb)
print('Cohen d', d)
def main(script):
"""Tests the functions in this module.
script: string script name
"""
live, firsts, others = first.MakeFrames()
hist = thinkstats2.Hist(live.prglngth)
# explore the weight difference between first babies and others
WeightDifference(live, firsts, others)
# test Mode
mode = Mode(hist)
print('Mode of preg length', mode)
assert(mode == 39)
# test AllModes
modes = AllModes(hist)
assert(modes[0][1] == 4693)
for value, freq in modes[:5]:
print(value, freq)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 | -2,958,441,997,668,105,700 | 22.329897 | 75 | 0.653115 | false |
guilhermegm/django-tastypie | tests/core/tests/paginator.py | 25 | 11701 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from tastypie.exceptions import BadRequest
from tastypie.paginator import Paginator
from core.models import Note
from core.tests.resources import NoteResource
from django.db import reset_queries
from django.http import QueryDict
class PaginatorTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(PaginatorTestCase, self).setUp()
self.data_set = Note.objects.all()
self.old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
super(PaginatorTestCase, self).tearDown()
def _get_query_count(self):
try:
from django.db import connections
return connections['default'].queries
except ImportError:
from django.db import connection
return connection.queries
def test_page1(self):
reset_queries()
self.assertEqual(len(self._get_query_count()), 0)
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
# REGRESSION: Check to make sure only part of the cache is full.
# We used to run ``len()`` on the ``QuerySet``, which would populate
# the entire result set. Owwie.
paginator.get_count()
self.assertEqual(len(self._get_query_count()), 1)
# Should be nothing in the cache.
self.assertEqual(paginator.objects._result_cache, None)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=2' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page2(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page3(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=2' in meta['previous'])
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_page2_with_request(self):
for req in [{'offset' : '2', 'limit' : '2'}, QueryDict('offset=2&limit=2')]:
paginator = Paginator(req, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page3_with_request(self):
for req in [{'offset' : '4', 'limit' : '2'}, QueryDict('offset=4&limit=2')]:
paginator = Paginator(req, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=2' in meta['previous'])
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_large_limit(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_all(self):
paginator = Paginator({'limit': 0}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
page = paginator.page()
meta = page['meta']
self.assertEqual(meta['limit'], 1000)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(page['objects']), 6)
def test_complex_get(self):
request = {
'slug__startswith': 'food',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=food' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=food' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_limit(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.limit = '10'
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = None
self.assertEqual(paginator.get_limit(), 20)
paginator.limit = 10
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = -10
raised = False
try:
paginator.get_limit()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid limit '-10' provided. Please provide a positive integer >= 0.")
paginator.limit = 'hAI!'
raised = False
try:
paginator.get_limit()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid limit 'hAI!' provided. Please provide a positive integer.")
# Test the max_limit.
paginator.limit = 1000
self.assertEqual(paginator.get_limit(), 1000)
paginator.limit = 1001
self.assertEqual(paginator.get_limit(), 1000)
paginator = Paginator({}, self.data_set, limit=20, offset=0, max_limit=10)
self.assertEqual(paginator.get_limit(), 10)
def test_offset(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.offset = '10'
self.assertEqual(paginator.get_offset(), 10)
paginator.offset = 0
self.assertEqual(paginator.get_offset(), 0)
paginator.offset = 10
self.assertEqual(paginator.get_offset(), 10)
paginator.offset= -10
raised = False
try:
paginator.get_offset()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid offset '-10' provided. Please provide a positive integer >= 0.")
paginator.offset = 'hAI!'
raised = False
try:
paginator.get_offset()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid offset 'hAI!' provided. Please provide an integer.")
def test_regression_nonqueryset(self):
paginator = Paginator({}, ['foo', 'bar', 'baz'], limit=2, offset=0)
# This would fail due to ``count`` being present on ``list`` but called
# differently.
page = paginator.page()
self.assertEqual(page['objects'], ['foo', 'bar'])
def test_unicode_request(self):
request = {
'slug__startswith': u'☃',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
request = QueryDict('slug__startswith=☃&format=json')
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_custom_collection_name(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0, collection_name='notes')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(paginator.page()['notes']), 6)
def test_multiple(self):
request = QueryDict('a=1&a=2')
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('a=1' in meta['previous'])
self.assertTrue('a=2' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('a=1' in meta['next'])
self.assertTrue('a=2' in meta['next'])
def test_max_limit(self):
paginator = Paginator({'limit': 0}, self.data_set, max_limit=10,
resource_uri='/api/v1/notes/')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 10)
def test_max_limit_none(self):
paginator = Paginator({'limit': 0}, self.data_set, max_limit=None,
resource_uri='/api/v1/notes/')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 0)
| bsd-3-clause | 7,914,788,198,693,054,000 | 40.332155 | 124 | 0.600239 | false |
wroersma/volatility | volatility/scan.py | 14 | 6848 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# Derived from source in PyFlag developed by:
# Copyright 2004: Commonwealth of Australia.
# Michael Cohen <[email protected]>
# David Collett <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
# Special thanks to Michael Cohen for ideas and comments!
#
#pylint: disable-msg=C0111
"""
@author: AAron Walters
@license: GNU General Public License 2.0
@contact: [email protected]
@organization: Volatility Foundation
"""
import volatility.debug as debug
import volatility.registry as registry
import volatility.addrspace as addrspace
import volatility.constants as constants
import volatility.conf as conf
########### Following is the new implementation of the scanning
########### framework. The old framework was based on PyFlag's
########### scanning framework which is probably too complex for this.
class BaseScanner(object):
""" A more thorough scanner which checks every byte """
checks = []
def __init__(self, window_size = 8):
self.buffer = addrspace.BufferAddressSpace(conf.DummyConfig(), data = '\x00' * 1024)
self.window_size = window_size
self.constraints = []
self.error_count = 0
def check_addr(self, found):
""" This calls all our constraints on the offset found and
returns the number of contraints that matched.
We shortcut the loop as soon as its obvious that there will
not be sufficient matches to fit the criteria. This allows for
an early exit and a speed boost.
"""
cnt = 0
for check in self.constraints:
## constraints can raise for an error
try:
val = check.check(found)
except Exception:
debug.b()
val = False
if not val:
cnt = cnt + 1
if cnt > self.error_count:
return False
return True
overlap = 20
def scan(self, address_space, offset = 0, maxlen = None):
self.buffer.profile = address_space.profile
current_offset = offset
## Build our constraints from the specified ScannerCheck
## classes:
self.constraints = []
for class_name, args in self.checks:
check = registry.get_plugin_classes(ScannerCheck)[class_name](self.buffer, **args)
self.constraints.append(check)
## Which checks also have skippers?
skippers = [ c for c in self.constraints if hasattr(c, "skip") ]
for (range_start, range_size) in sorted(address_space.get_available_addresses()):
# Jump to the next available point to scan from
# self.base_offset jumps up to be at least range_start
current_offset = max(range_start, current_offset)
range_end = range_start + range_size
# If we have a maximum length, we make sure it's less than the range_end
if maxlen:
range_end = min(range_end, offset + maxlen)
while (current_offset < range_end):
# We've now got range_start <= self.base_offset < range_end
# Figure out how much data to read
l = min(constants.SCAN_BLOCKSIZE + self.overlap, range_end - current_offset)
# Populate the buffer with data
# We use zread to scan what we can because there are often invalid
# pages in the DTB
data = address_space.zread(current_offset, l)
self.buffer.assign_buffer(data, current_offset)
## Run checks throughout this block of data
i = 0
while i < l:
if self.check_addr(i + current_offset):
## yield the offset to the start of the memory
## (after the pool tag)
yield i + current_offset
## Where should we go next? By default we go 1 byte
## ahead, but if some of the checkers have skippers,
## we may actually go much farther. Checkers with
## skippers basically tell us that there is no way
## they can match anything before the skipped result,
## so there is no point in trying them on all the data
## in between. This optimization is useful to really
## speed things up. FIXME - currently skippers assume
## that the check must match, therefore we can skip
## the unmatchable region, but its possible that a
## scanner needs to match only some checkers.
skip = 1
for s in skippers:
skip = max(skip, s.skip(data, i))
i += skip
current_offset += min(constants.SCAN_BLOCKSIZE, l)
class DiscontigScanner(BaseScanner):
def scan(self, address_space, offset = 0, maxlen = None):
debug.warning("DiscontigScanner has been deprecated, all functionality is now contained in BaseScanner")
for match in BaseScanner.scan(self, address_space, offset, maxlen):
yield match
class ScannerCheck(object):
""" A scanner check is a special class which is invoked on an AS to check for a specific condition.
The main method is def check(self, offset):
This will return True if the condition is true or False otherwise.
This class is the base class for all checks.
"""
def __init__(self, address_space, **_kwargs):
self.address_space = address_space
def object_offset(self, offset, address_space):
return offset
def check(self, _offset):
return False
## If you want to speed up the scanning define this method - it
## will be used to skip the data which is obviously not going to
## match. You will need to return the number of bytes from offset
## to skip to. We take the maximum number of bytes to guarantee
## that all checks have a chance of passing.
#def skip(self, data, offset):
# return -1
| gpl-2.0 | -1,684,165,884,424,054,000 | 38.583815 | 112 | 0.618867 | false |
hasecbinusr/pysal | pysal/esda/tests/test_geary.py | 5 | 2997 | """Geary Unittest."""
import unittest
from ... import open as popen
from ... import examples
from .. import geary
import numpy as np
from ...common import pandas
PANDAS_EXTINCT = pandas is None
class Geary_Tester(unittest.TestCase):
"""Geary class for unit tests."""
def setUp(self):
self.w = popen(examples.get_path("book.gal")).read()
f = popen(examples.get_path("book.txt"))
self.y = np.array(f.by_col['y'])
def test_Geary(self):
c = geary.Geary(self.y, self.w, permutations=0)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.018437747611029367)
self.assertAlmostEquals(c.p_rand, 4.5059156794646782e-07)
self.assertAlmostEquals(c.z_rand, -4.9120733751216008)
self.assertAlmostEquals(c.seC_rand, 0.13578566791465646)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.018437747611029367)
self.assertAlmostEquals(c.p_rand, 4.5059156794646782e-07)
self.assertAlmostEquals(c.z_rand, -4.9120733751216008)
self.assertAlmostEquals(c.seC_rand, 0.13578566791465646)
self.assertAlmostEquals(c.EC_sim, 0.9980676303238214)
self.assertAlmostEquals(c.VC_sim, 0.034430408799858946)
self.assertAlmostEquals(c.p_sim, 0.001)
self.assertAlmostEquals(c.p_z_sim, 0.00016908100514811952)
self.assertAlmostEquals(c.z_sim, -3.5841621159171746)
self.assertAlmostEquals(c.seC_sim, 0.18555432843202269)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
r1 = geary.Geary.by_col(df, ['y'], w=self.w, permutations=999)
this_geary = np.unique(r1.y_geary.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(this_geary, c.C)
self.assertAlmostEquals(this_pval, c.p_sim)
suite = unittest.TestSuite()
test_classes = [Geary_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause | 7,710,925,304,270,476,000 | 37.423077 | 70 | 0.680347 | false |
peterbarker/ardupilot-1 | Tools/LogAnalyzer/DataflashLog.py | 17 | 29790 | #
# Code to abstract the parsing of APM Dataflash log files, currently only used by the LogAnalyzer
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
from __future__ import print_function
import collections
import os
import numpy
import bisect
import sys
import ctypes
from VehicleType import VehicleType, VehicleTypeString
class Format(object):
'''Data channel format as specified by the FMT lines in the log file'''
def __init__(self,msgType,msgLen,name,types,labels):
self.NAME = 'FMT'
self.msgType = msgType
self.msgLen = msgLen
self.name = name
self.types = types
self.labels = labels.split(',')
def __str__(self):
return "%8s %s" % (self.name, `self.labels`)
@staticmethod
def trycastToFormatType(value,valueType):
'''using format characters from libraries/DataFlash/DataFlash.h to cast strings to basic python int/float/string types
tries a cast, if it does not work, well, acceptable as the text logs do not match the format, e.g. MODE is expected to be int'''
try:
if valueType in "fcCeELd":
return float(value)
elif valueType in "bBhHiIMQq":
return int(value)
elif valueType in "nNZ":
return str(value)
except:
pass
return value
def to_class(self):
members = dict(
NAME = self.name,
labels = self.labels[:],
)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels[:]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
p = property(lambda x:getattr(x, attributename),
lambda x, v:setattr(x,attributename, Format.trycastToFormatType(v,format)))
members[propertyname] = p
members[attributename] = None
createproperty(label, _type)
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,'_'+k)) for k in x.labels]))
def init(a, *x):
if len(x) != len(a.labels):
raise ValueError("Invalid Length")
#print(list(zip(a.labels, x)))
for (l,v) in zip(a.labels, x):
try:
setattr(a, l, v)
except Exception as e:
print("{} {} {} failed".format(a,l,v))
print(e)
members['__init__'] = init
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(object,),
members
)
#print(members)
return cls
class logheader(ctypes.LittleEndianStructure):
_fields_ = [ \
('head1', ctypes.c_uint8),
('head2', ctypes.c_uint8),
('msgid', ctypes.c_uint8),
]
def __repr__(self):
return "<logheader head1=0x{self.head1:x} head2=0x{self.head2:x} msgid=0x{self.msgid:x} ({self.msgid})>".format(self=self)
class BinaryFormat(ctypes.LittleEndianStructure):
NAME = 'FMT'
MSG = 128
SIZE = 0
FIELD_FORMAT = {
'b': ctypes.c_int8,
'B': ctypes.c_uint8,
'h': ctypes.c_int16,
'H': ctypes.c_uint16,
'i': ctypes.c_int32,
'I': ctypes.c_uint32,
'f': ctypes.c_float,
'd': ctypes.c_double,
'n': ctypes.c_char * 4,
'N': ctypes.c_char * 16,
'Z': ctypes.c_char * 64,
'c': ctypes.c_int16,# * 100,
'C': ctypes.c_uint16,# * 100,
'e': ctypes.c_int32,# * 100,
'E': ctypes.c_uint32,# * 100,
'L': ctypes.c_int32,
'M': ctypes.c_uint8,
'q': ctypes.c_int64,
'Q': ctypes.c_uint64,
}
FIELD_SCALE = {
'c': 100,
'C': 100,
'e': 100,
'E': 100,
}
_packed_ = True
_fields_ = [ \
('head', logheader),
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('name', ctypes.c_char * 4),
('types', ctypes.c_char * 16),
('labels', ctypes.c_char * 64),
]
def __repr__(self):
return "<{cls} {data}>".format(cls=self.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(self,k)) for (k,_) in self._fields_[1:]]))
def to_class(self):
members = dict(
NAME = self.name,
MSG = self.type,
SIZE = self.length,
labels = self.labels.split(",") if self.labels else [],
_pack_ = True)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels.split(",")
if self.labels and (len(fieldtypes) != len(fieldlabels)):
print("Broken FMT message for {} .. ignoring".format(self.name), file=sys.stderr)
return None
fields = [('head',logheader)]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
scale = BinaryFormat.FIELD_SCALE.get(format, None)
p = property(lambda x:getattr(x, attributename))
if scale is not None:
p = property(lambda x:getattr(x, attributename) / scale)
members[propertyname] = p
try:
fields.append((attributename, BinaryFormat.FIELD_FORMAT[format]))
except KeyError:
print('ERROR: Failed to add FMT type: {}, with format: {}'.format(attributename, format))
raise
createproperty(label, _type)
members['_fields_'] = fields
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,k)) for k in x.labels]))
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(ctypes.LittleEndianStructure,),
members
)
if ctypes.sizeof(cls) != cls.SIZE:
print("size mismatch for {} expected {} got {}".format(cls, ctypes.sizeof(cls), cls.SIZE), file=sys.stderr)
# for i in cls.labels:
# print("{} = {}".format(i,getattr(cls,'_'+i)))
return None
return cls
BinaryFormat.SIZE = ctypes.sizeof(BinaryFormat)
class Channel(object):
'''storage for a single stream of data, i.e. all GPS.RelAlt values'''
# TODO: rethink data storage, but do more thorough regression testing before refactoring it
# TODO: store data as a scipy spline curve so we can more easily interpolate and sample the slope?
def __init__(self):
self.dictData = {} # dict of linenum->value # store dupe data in dict and list for now, until we decide which is the better way to go
self.listData = [] # list of (linenum,value) # store dupe data in dict and list for now, until we decide which is the better way to go
def getSegment(self, startLine, endLine):
'''returns a segment of this data (from startLine to endLine, inclusive) as a new Channel instance'''
segment = Channel()
segment.dictData = {k:v for k,v in self.dictData.iteritems() if k >= startLine and k <= endLine}
return segment
def min(self):
return min(self.dictData.values())
def max(self):
return max(self.dictData.values())
def avg(self):
return numpy.mean(self.dictData.values())
def getNearestValueFwd(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
while index<len(self.listData):
line = self.listData[index][0]
#print "Looking forwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line >= lineNumber:
return (self.listData[index][1],line)
index += 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValueBack(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999)) - 1
while index>=0:
line = self.listData[index][0]
#print "Looking backwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line <= lineNumber:
return (self.listData[index][1],line)
index -= 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValue(self, lineNumber, lookForwards=True):
'''find the nearest data value to the given lineNumber, defaults to first looking forwards. Returns (value,lineNumber)'''
if lookForwards:
try:
return self.getNearestValueFwd(lineNumber)
except:
return self.getNearestValueBack(lineNumber)
else:
try:
return self.getNearestValueBack(lineNumber)
except:
return self.getNearestValueFwd(lineNumber)
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getInterpolatedValue(self, lineNumber):
(prevValue,prevValueLine) = self.getNearestValue(lineNumber, lookForwards=False)
(nextValue,nextValueLine) = self.getNearestValue(lineNumber, lookForwards=True)
if prevValueLine == nextValueLine:
return prevValue
weight = (lineNumber-prevValueLine) / float(nextValueLine-prevValueLine)
return ((weight*prevValue) + ((1-weight)*nextValue))
def getIndexOf(self, lineNumber):
'''returns the index within this channel's listData of the given lineNumber, or raises an Exception if not found'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
#print "INDEX of line %d: %d" % (lineNumber,index)
#print "self.listData[index][0]: %d" % self.listData[index][0]
if (self.listData[index][0] == lineNumber):
return index
else:
raise Exception("Error finding index for line %d" % lineNumber)
class LogIterator:
'''Smart iterator that can move through a log by line number and maintain an index into the nearest values of all data channels'''
# TODO: LogIterator currently indexes the next available value rather than the nearest value, we should make it configurable between next/nearest
class LogIteratorSubValue:
'''syntactic sugar to allow access by LogIterator[lineLabel][dataLabel]'''
logdata = None
iterators = None
lineLabel = None
def __init__(self, logdata, iterators, lineLabel):
self.logdata = logdata
self.lineLabel = lineLabel
self.iterators = iterators
def __getitem__(self, dataLabel):
index = self.iterators[self.lineLabel][0]
return self.logdata.channels[self.lineLabel][dataLabel].listData[index][1]
iterators = {} # lineLabel -> (listIndex,lineNumber)
logdata = None
currentLine = None
def __init__(self, logdata, lineNumber=0):
self.logdata = logdata
self.currentLine = lineNumber
for lineLabel in self.logdata.formats:
if lineLabel in self.logdata.channels:
self.iterators[lineLabel] = ()
self.jump(lineNumber)
def __iter__(self):
return self
def __getitem__(self, lineLabel):
return LogIterator.LogIteratorSubValue(self.logdata, self.iterators, lineLabel)
def next(self):
'''increment iterator to next log line'''
self.currentLine += 1
if self.currentLine > self.logdata.lineCount:
return self
for lineLabel in self.iterators.keys():
# check if the currentLine has gone past our the line we're pointing to for this type of data
dataLabel = self.logdata.formats[lineLabel].labels[0]
(index, lineNumber) = self.iterators[lineLabel]
# if so, and it is not the last entry in the log, then increment the indices for all dataLabels under that lineLabel
if (self.currentLine > lineNumber) and (index < len(self.logdata.channels[lineLabel][dataLabel].listData)-1):
index += 1
lineNumber = self.logdata.channels[lineLabel][dataLabel].listData[index][0]
self.iterators[lineLabel] = (index,lineNumber)
return self
def jump(self, lineNumber):
'''jump iterator to specified log line'''
self.currentLine = lineNumber
for lineLabel in self.iterators.keys():
dataLabel = self.logdata.formats[lineLabel].labels[0]
(value,lineNumber) = self.logdata.channels[lineLabel][dataLabel].getNearestValue(self.currentLine)
self.iterators[lineLabel] = (self.logdata.channels[lineLabel][dataLabel].getIndexOf(lineNumber), lineNumber)
class DataflashLogHelper:
'''helper functions for dealing with log data, put here to keep DataflashLog class as a simple parser and data store'''
@staticmethod
def getTimeAtLine(logdata, lineNumber):
'''returns the nearest GPS timestamp in milliseconds after the given line number'''
if not "GPS" in logdata.channels:
raise Exception("no GPS log data found")
# older logs use 'TIme', newer logs use 'TimeMS'
timeLabel = "TimeMS"
if "Time" in logdata.channels["GPS"]:
timeLabel = "Time"
while lineNumber <= logdata.lineCount:
if lineNumber in logdata.channels["GPS"][timeLabel].dictData:
return logdata.channels["GPS"][timeLabel].dictData[lineNumber]
lineNumber = lineNumber + 1
sys.stderr.write("didn't find GPS data for " + str(lineNumber) + " - using maxtime\n")
return logdata.channels["GPS"][timeLabel].max()
@staticmethod
def findLoiterChunks(logdata, minLengthSeconds=0, noRCInputs=True):
'''returns a list of (to,from) pairs defining sections of the log which are in loiter mode. Ordered from longest to shortest in time. If noRCInputs == True it only returns chunks with no control inputs'''
# TODO: implement noRCInputs handling when identifying stable loiter chunks, for now we're ignoring it
def chunkSizeCompare(chunk1, chunk2):
chunk1Len = chunk1[1]-chunk1[0]
chunk2Len = chunk2[1]-chunk2[0]
if chunk1Len == chunk2Len:
return 0
elif chunk1Len > chunk2Len:
return -1
else:
return 1
od = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
chunks = []
for i in range(len(od.keys())):
if od.values()[i][0] == "LOITER":
startLine = od.keys()[i]
endLine = None
if i == len(od.keys())-1:
endLine = logdata.lineCount
else:
endLine = od.keys()[i+1]-1
chunkTimeSeconds = (DataflashLogHelper.getTimeAtLine(logdata,endLine)-DataflashLogHelper.getTimeAtLine(logdata,startLine)+1) / 1000.0
if chunkTimeSeconds > minLengthSeconds:
chunks.append((startLine,endLine))
#print "LOITER chunk: %d to %d, %d lines" % (startLine,endLine,endLine-startLine+1)
#print " (time %d to %d, %d seconds)" % (DataflashLogHelper.getTimeAtLine(logdata,startLine), DataflashLogHelper.getTimeAtLine(logdata,endLine), chunkTimeSeconds)
chunks.sort(chunkSizeCompare)
return chunks
@staticmethod
def isLogEmpty(logdata):
'''returns an human readable error string if the log is essentially empty, otherwise returns None'''
# naive check for now, see if the throttle output was ever above 20%
throttleThreshold = 20
if logdata.vehicleType == VehicleType.Copter:
throttleThreshold = 200 # copter uses 0-1000, plane+rover use 0-100
if "CTUN" in logdata.channels:
try:
maxThrottle = logdata.channels["CTUN"]["ThrOut"].max()
except KeyError as e:
# ThrOut was shorted to ThO at some stage...
maxThrottle = logdata.channels["CTUN"]["ThO"].max()
# at roughly the same time ThO became a range from 0 to 1
throttleThreshold = 0.2
if maxThrottle < throttleThreshold:
return "Throttle never above 20%"
return None
class DataflashLog(object):
'''APM Dataflash log file reader and container class. Keep this simple, add more advanced or specific functions to DataflashLogHelper class'''
knownHardwareTypes = ["APM", "PX4", "MPNG"]
intTypes = "bBhHiIM"
floatTypes = "fcCeEL"
charTypes = "nNZ"
def __init__(self, logfile=None, format="auto", ignoreBadlines=False):
self.filename = None
self.vehicleType = None # from VehicleType enumeration; value derived from header
self.vehicleTypeString = None # set at same time has the enum value
self.firmwareVersion = ""
self.firmwareHash = ""
self.freeRAM = 0
self.hardwareType = "" # APM 1, APM 2, PX4, MPNG, etc What is VRBrain? BeagleBone, etc? Needs more testing
self.formats = {} # name -> Format
self.parameters = {} # token -> value
self.messages = {} # lineNum -> message
self.modeChanges = {} # lineNum -> (mode,value)
self.channels = {} # lineLabel -> {dataLabel:Channel}
self.filesizeKB = 0
self.durationSecs = 0
self.lineCount = 0
self.skippedLines = 0
self.backpatch_these_modechanges = []
if logfile:
self.read(logfile, format, ignoreBadlines)
def getCopterType(self):
'''returns quad/hex/octo/tradheli if this is a copter log'''
if self.vehicleType != VehicleType.Copter:
return None
motLabels = []
if "MOT" in self.formats: # not listed in PX4 log header for some reason?
motLabels = self.formats["MOT"].labels
if "GGain" in motLabels:
return "tradheli"
elif len(motLabels) == 4:
return "quad"
elif len(motLabels) == 6:
return "hex"
elif len(motLabels) == 8:
return "octo"
else:
return ""
def read(self, logfile, format="auto", ignoreBadlines=False):
'''returns on successful log read (including bad lines if ignoreBadlines==True), will throw an Exception otherwise'''
# TODO: dataflash log parsing code is pretty hacky, should re-write more methodically
self.filename = logfile
if self.filename == '<stdin>':
f = sys.stdin
else:
f = open(self.filename, 'r')
if format == 'bin':
head = '\xa3\x95\x80\x80'
elif format == 'log':
head = ""
elif format == 'auto':
if self.filename == '<stdin>':
# assuming TXT format
# raise ValueError("Invalid log format for stdin: {}".format(format))
head = ""
else:
head = f.read(4)
f.seek(0)
else:
raise ValueError("Unknown log format for {}: {}".format(self.filename, format))
if head == '\xa3\x95\x80\x80':
numBytes, lineNumber = self.read_binary(f, ignoreBadlines)
pass
else:
numBytes, lineNumber = self.read_text(f, ignoreBadlines)
# gather some general stats about the log
self.lineCount = lineNumber
self.filesizeKB = numBytes / 1024.0
# TODO: switch duration calculation to use TimeMS values rather than GPS timestemp
if "GPS" in self.channels:
# the GPS time label changed at some point, need to handle both
timeLabel = None
for i in 'TimeMS','TimeUS','Time':
if i in self.channels["GPS"]:
timeLabel = i
break
firstTimeGPS = int(self.channels["GPS"][timeLabel].listData[0][1])
lastTimeGPS = int(self.channels["GPS"][timeLabel].listData[-1][1])
if timeLabel == 'TimeUS':
firstTimeGPS /= 1000
lastTimeGPS /= 1000
self.durationSecs = (lastTimeGPS-firstTimeGPS) / 1000
# TODO: calculate logging rate based on timestamps
# ...
msg_vehicle_to_vehicle_map = {
"ArduCopter": VehicleType.Copter,
"APM:Copter": VehicleType.Copter,
"ArduPlane": VehicleType.Plane,
"ArduRover": VehicleType.Rover
}
# takes the vehicle type supplied via "MSG" and sets vehicleType from
# the VehicleType enumeration
def set_vehicleType_from_MSG_vehicle(self, MSG_vehicle):
ret = self.msg_vehicle_to_vehicle_map.get(MSG_vehicle, None)
if ret is None:
raise ValueError("Unknown vehicle type (%s)" % (MSG_vehicle))
self.vehicleType = ret
self.vehicleTypeString = VehicleTypeString[ret]
def handleModeChange(self, lineNumber, e):
if self.vehicleType == VehicleType.Copter:
try:
modes = {0:'STABILIZE',
1:'ACRO',
2:'ALT_HOLD',
3:'AUTO',
4:'GUIDED',
5:'LOITER',
6:'RTL',
7:'CIRCLE',
9:'LAND',
10:'OF_LOITER',
11:'DRIFT',
13:'SPORT',
14:'FLIP',
15:'AUTOTUNE',
16:'HYBRID',}
if hasattr(e, 'ThrCrs'):
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ThrCrs)
else:
# assume it has ModeNum:
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ModeNum)
except:
if hasattr(e, 'ThrCrs'):
self.modeChanges[lineNumber] = (e.Mode, e.ThrCrs)
else:
# assume it has ModeNum:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
elif self.vehicleType in [VehicleType.Plane, VehicleType.Copter, VehicleType.Rover]:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
else:
# if you've gotten to here the chances are we don't
# know what vehicle you're flying...
raise Exception("Unknown log type for MODE line vehicletype=({}) line=({})".format(self.vehicleTypeString, repr(e)))
def backPatchModeChanges(self):
for (lineNumber, e) in self.backpatch_these_modechanges:
self.handleModeChange(lineNumber, e)
def process(self, lineNumber, e):
if e.NAME == 'FMT':
cls = e.to_class()
if cls is not None: # FMT messages can be broken ...
if hasattr(e, 'type') and e.type not in self._formats: # binary log specific
self._formats[e.type] = cls
if cls.NAME not in self.formats:
self.formats[cls.NAME] = cls
elif e.NAME == "PARM":
self.parameters[e.Name] = e.Value
elif e.NAME == "MSG":
if not self.vehicleType:
tokens = e.Message.split(' ')
self.set_vehicleType_from_MSG_vehicle(tokens[0]);
self.backPatchModeChanges()
self.firmwareVersion = tokens[1]
if len(tokens) == 3:
self.firmwareHash = tokens[2][1:-1]
else:
self.messages[lineNumber] = e.Message
elif e.NAME == "MODE":
if self.vehicleType is None:
self.backpatch_these_modechanges.append( (lineNumber, e) )
else:
self.handleModeChange(lineNumber, e)
# anything else must be the log data
else:
groupName = e.NAME
# first time seeing this type of log line, create the channel storage
if not groupName in self.channels:
self.channels[groupName] = {}
for label in e.labels:
self.channels[groupName][label] = Channel()
# store each token in its relevant channel
for label in e.labels:
value = getattr(e, label)
channel = self.channels[groupName][label]
channel.dictData[lineNumber] = value
channel.listData.append((lineNumber, value))
def read_text(self, f, ignoreBadlines):
self.formats = {'FMT':Format}
lineNumber = 0
numBytes = 0
knownHardwareTypes = ["APM", "PX4", "MPNG"]
for line in f:
lineNumber = lineNumber + 1
numBytes += len(line) + 1
try:
#print "Reading line: %d" % lineNumber
line = line.strip('\n\r')
tokens = line.split(', ')
# first handle the log header lines
if line == " Ready to drive." or line == " Ready to FLY.":
continue
if line == "----------------------------------------": # present in pre-3.0 logs
raise Exception("Log file seems to be in the older format (prior to self-describing logs), which isn't supported")
if len(tokens) == 1:
tokens2 = line.split(' ')
if line == "":
pass
elif len(tokens2) == 1 and tokens2[0].isdigit(): # log index
pass
elif len(tokens2) == 3 and tokens2[0] == "Free" and tokens2[1] == "RAM:":
self.freeRAM = int(tokens2[2])
elif tokens2[0] in knownHardwareTypes:
self.hardwareType = line # not sure if we can parse this more usefully, for now only need to report it back verbatim
elif (len(tokens2) == 2 or len(tokens2) == 3) and tokens2[1][0].lower() == "v": # e.g. ArduCopter V3.1 (5c6503e2)
self.set_vehicleType_from_MSG_vehicle(tokens2[0])
self.firmwareVersion = tokens2[1]
if len(tokens2) == 3:
self.firmwareHash = tokens2[2][1:-1]
else:
errorMsg = "Error parsing line %d of log file: %s" % (lineNumber, self.filename)
if ignoreBadlines:
print(errorMsg + " (skipping line)", file=sys.stderr)
self.skippedLines += 1
else:
raise Exception("")
else:
if not tokens[0] in self.formats:
raise ValueError("Unknown Format {}".format(tokens[0]))
e = self.formats[tokens[0]](*tokens[1:])
self.process(lineNumber, e)
except Exception as e:
print("BAD LINE: " + line, file=sys.stderr)
if not ignoreBadlines:
raise Exception("Error parsing line %d of log file %s - %s" % (lineNumber,self.filename,e.args[0]))
return (numBytes,lineNumber)
def read_binary(self, f, ignoreBadlines):
lineNumber = 0
numBytes = 0
for e in self._read_binary(f, ignoreBadlines):
lineNumber += 1
if e is None:
continue
numBytes += e.SIZE
# print(e)
self.process(lineNumber, e)
return (numBytes,lineNumber)
def _read_binary(self, f, ignoreBadlines):
self._formats = {128:BinaryFormat}
data = bytearray(f.read())
offset = 0
while len(data) > offset + ctypes.sizeof(logheader):
h = logheader.from_buffer(data, offset)
if not (h.head1 == 0xa3 and h.head2 == 0x95):
if ignoreBadlines == False:
raise ValueError(h)
else:
if h.head1 == 0xff and h.head2 == 0xff and h.msgid == 0xff:
print("Assuming EOF due to dataflash block tail filled with \\xff... (offset={off})".format(off=offset), file=sys.stderr)
break
offset += 1
continue
if h.msgid in self._formats:
typ = self._formats[h.msgid]
if len(data) <= offset + typ.SIZE:
break
try:
e = typ.from_buffer(data, offset)
except:
print("data:{} offset:{} size:{} sizeof:{} sum:{}".format(len(data),offset,typ.SIZE,ctypes.sizeof(typ),offset+typ.SIZE))
raise
offset += typ.SIZE
else:
raise ValueError(str(h) + "unknown type")
yield e
| gpl-3.0 | -1,630,936,543,845,855,700 | 41.925072 | 212 | 0.557033 | false |
DazWorrall/zulip | zilencer/management/commands/profile_request.py | 117 | 1632 | from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import get_user_profile_by_email, UserMessage
from zerver.views.old_messages import get_old_messages_backend
import cProfile
import logging
from zerver.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
| apache-2.0 | 7,563,429,974,418,282,000 | 31.64 | 122 | 0.647059 | false |
hellotomfan/v8-coroutine | build/gyp/test/library/gyptest-shared-obj-install-path.py | 289 | 1180 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that .so files that are order only dependencies are specified by
their install location rather than by their alias.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('shared_dependency.gyp',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
if test.format=='android':
makefile_path = 'relocate/src/GypAndroid.mk'
else:
makefile_path = 'relocate/src/Makefile'
with open(makefile_path) as makefile:
make_contents = makefile.read()
# If we remove the code to generate lib1, Make should still be able
# to build lib2 since lib1.so already exists.
make_contents = make_contents.replace('include lib1.target.mk', '')
with open(makefile_path, 'w') as makefile:
makefile.write(make_contents)
test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
test.pass_test()
| gpl-2.0 | 5,909,126,666,171,539,000 | 27.095238 | 73 | 0.731356 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.