text
stringlengths 4
1.02M
| meta
dict |
---|---|
import os
from alembic import command as alembic_cmd
from alembic import config as alembic_cfg
from alembic import util as alembic_u
from oslo_config import cfg
from sahara.i18n import _
CONF = cfg.CONF
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_cmd, cmd)(config, *args, **kwargs)
except alembic_u.CommandError as e:
alembic_u.err(str(e))
def do_check_migration(config, _cmd):
do_alembic_command(config, 'branches')
def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
sign = '+' if CONF.command.name == 'upgrade' else '-'
revision = sign + str(CONF.command.delta)
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
parser = subparsers.add_parser('upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade_downgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_cfg.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'sahara.db.migration:alembic_migrations')
# attach the Sahara conf to the Alembic conf
config.sahara_config = CONF
CONF(project='sahara')
CONF.command.func(config, CONF.command.name)
| {
"content_hash": "2fde8393405e855f3ebfd9032462692a",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 76,
"avg_line_length": 30.78723404255319,
"alnum_prop": 0.6389080856945404,
"repo_name": "shakamunyi/sahara",
"id": "79d25a16ba268789a0f91b031998f9bac6573953",
"size": "3477",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sahara/db/migration/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "36849"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "4947252"
},
{
"name": "Shell",
"bytes": "100611"
}
],
"symlink_target": ""
} |
import kivy
kivy.require('1.9.0')
from kivy.clock import Clock
from kivy.uix.image import Image
from kivy.properties import BooleanProperty
from ammo import Shot
class Shooter(Image):
reloaded = True
alife = False
def on_touch_down(self, touch):
if self.parent.collide_point(*touch.pos):
self.center_x = touch.x
touch.ud['move'] = True
elif self.enemy_area.collide_point(*touch.pos):
self.shoot(touch.x, touch.y)
touch.ud['shoot'] = True
def on_touch_move(self, touch):
if self.parent.collide_point(*touch.pos):
self.center_x = touch.x
elif self.enemy_area.collide_point(*touch.pos):
self.shoot(touch.x, touch.y)
def on_touch_up(self, touch):
if 'shoot' in touch.ud and touch.ud['shoot']:
self.reloaded = True
def start(self, instance, value):
self.alife=True
def shoot(self, fx, fy):
if self.reloaded and self.alife:
self.reloaded = False
Clock.schedule_once(self.reload_gun, .5)
shot = Shot()
shot.center = (self.center_x, self.top)
self.invasion.add_widget(shot)
(fx, fy) = self.project(self.center_x, self.top, fx, fy)
shot.shoot(fx, fy, self.invasion.fleet)
def reload_gun(self, dt):
self.reloaded = True
def collide_ammo(self, ammo):
if self.collide_widget(ammo) and self.alife:
self.alife = False
self.color = (0,0,0,0)
self.invasion.end_game("Game Over")
return True
return False
def project(self, ix, iy, fx, fy):
(w, h) = self.invasion.size
if ix == fx: return (ix, h)
m = (fy-iy) / (fx-ix)
b = iy - m*ix
x = (h-b)/m
if x < 0: return (0, b)
elif x > w: return (w, m*w+b)
return (x, h)
| {
"content_hash": "63d2e239ed50ec757a7d543bca21a276",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 68,
"avg_line_length": 29.338461538461537,
"alnum_prop": 0.5568956476140535,
"repo_name": "pimier15/PyGUI",
"id": "26840250dac7dfd2f275de8c8dcb613511f6c8a7",
"size": "1931",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Kivy/Kivy/Bk_Interractive/sample/Chapter_05_code/shooter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "636598"
}
],
"symlink_target": ""
} |
from oslo_utils import uuidutils as uid
from rally.benchmark.scenarios import base
from rally.common import log as logging
from rally.plugins.openstack.wrappers import network as network_wrapper
LOG = logging.getLogger(__name__)
class NeutronScenario(base.Scenario):
"""Base class for Neutron scenarios with basic atomic actions."""
RESOURCE_NAME_PREFIX = "rally_net_"
RESOURCE_NAME_LENGTH = 16
SUBNET_IP_VERSION = 4
@base.atomic_action_timer("neutron.create_network")
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args.setdefault("name", self._generate_random_name())
return self.clients("neutron").create_network(
{"network": network_create_args})
@base.atomic_action_timer("neutron.list_networks")
def _list_networks(self):
"""Return user networks list."""
return self.clients("neutron").list_networks()["networks"]
@base.atomic_action_timer("neutron.update_network")
def _update_network(self, network, network_update_args):
"""Update the network name and admin state.
This atomic function updates network name by
appending the existing name and admin state with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
suffix = network_update_args.get(
"name", self._generate_random_name("_"))
admin_state_up = network_update_args.get("admin_state_up", True)
body = {
"network": {
"name": network["network"]["name"] + suffix,
"admin_state_up": admin_state_up
}
}
return self.clients("neutron").update_network(
network["network"]["id"], body)
@base.atomic_action_timer("neutron.delete_network")
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.clients("neutron").delete_network(network["id"])
@base.atomic_action_timer("neutron.create_subnet")
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
network_id = network["network"]["id"]
if not subnet_create_args.get("cidr"):
start_cidr = start_cidr or "10.2.0.0/24"
subnet_create_args["cidr"] = (
network_wrapper.generate_cidr(start_cidr=start_cidr))
subnet_create_args["network_id"] = network_id
subnet_create_args.setdefault(
"name", self._generate_random_name("rally_subnet_"))
subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION)
return self.clients("neutron").create_subnet(
{"subnet": subnet_create_args})
@base.atomic_action_timer("neutron.list_subnets")
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.clients("neutron").list_subnets()["subnets"]
@base.atomic_action_timer("neutron.update_subnet")
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet name and DHCP status.
This atomic function updates subnet name by
appending the existing name and DHCP status with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
suffix = subnet_update_args.get(
"name", self._generate_random_name("_"))
enable_dhcp = subnet_update_args.get("enable_dhcp", True)
body = {
"subnet": {
"name": subnet["subnet"]["name"] + suffix,
"enable_dhcp": enable_dhcp
}
}
return self.clients("neutron").update_subnet(
subnet["subnet"]["id"], body)
@base.atomic_action_timer("neutron.delete_subnet")
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.clients("neutron").delete_subnet(subnet["subnet"]["id"])
@base.atomic_action_timer("neutron.create_router")
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args.setdefault(
"name", self._generate_random_name("rally_router_"))
if external_gw:
for network in self._list_networks():
if network.get("router:external"):
external_network = network
gw_info = {"network_id": external_network["id"],
"enable_snat": True}
router_create_args.setdefault("external_gateway_info",
gw_info)
return self.clients("neutron").create_router(
{"router": router_create_args})
@base.atomic_action_timer("neutron.list_routers")
def _list_routers(self):
"""Returns user routers list."""
return self.clients("neutron").list_routers()["routers"]
@base.atomic_action_timer("neutron.delete_router")
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.clients("neutron").delete_router(router["router"]["id"])
@base.atomic_action_timer("neutron.update_router")
def _update_router(self, router, router_update_args):
"""Update the neutron router name and admin state.
This atomic function updates router name by
appending the existing name and admin state with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
suffix = router_update_args.get(
"name", self._generate_random_name("_"))
admin_state = router_update_args.get("admin_state_up", True)
body = {
"router": {
"name": router["router"]["name"] + suffix,
"admin_state_up": admin_state
}
}
return self.clients("neutron").update_router(
router["router"]["id"], body)
@base.atomic_action_timer("neutron.create_port")
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
port_create_args["network_id"] = network["network"]["id"]
port_create_args.setdefault(
"name", self._generate_random_name("rally_port_"))
return self.clients("neutron").create_port({"port": port_create_args})
@base.atomic_action_timer("neutron.list_ports")
def _list_ports(self):
"""Return user ports list."""
return self.clients("neutron").list_ports()["ports"]
@base.atomic_action_timer("neutron.update_port")
def _update_port(self, port, port_update_args):
"""Update the neutron port name, admin state, device id and owner.
This atomic function updates port name by
appending the existing name, admin state, device id and
device owner with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
suffix = port_update_args.get(
"name", self._generate_random_name("_"))
admin_state = port_update_args.get("admin_state_up", True)
device_owner = port_update_args.get("device_owner", "compute:nova")
device_id = port_update_args.get("device_id", uid.generate_uuid())
body = {
"port": {
"name": port["port"]["name"] + suffix,
"admin_state_up": admin_state,
"device_id": device_id,
"device_owner": device_owner
}
}
return self.clients("neutron").update_port(port["port"]["id"], body)
@base.atomic_action_timer("neutron.delete_port")
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.clients("neutron").delete_port(port["port"]["id"])
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
subnets = []
network = self._create_network(network_create_args or {})
for i in range(subnets_per_network):
subnet = self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
subnets.append(subnet)
return network, subnets
@base.atomic_action_timer("neutron.add_interface_router")
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").add_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@base.atomic_action_timer("neutron.remove_interface_router")
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").remove_interface_router(
router["id"], {"subnet_id": subnet["id"]})
| {
"content_hash": "efa19fb8c1b5ed09545700ea7dce7ded",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 78,
"avg_line_length": 38.82846715328467,
"alnum_prop": 0.5976125575712004,
"repo_name": "vponomaryov/rally",
"id": "0fe7fab7aa45ef8fc3c5881c66b2f32161a86189",
"size": "11266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/plugins/openstack/scenarios/neutron/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2367891"
},
{
"name": "Shell",
"bytes": "35878"
}
],
"symlink_target": ""
} |
"""Handles all requests relating to volumes."""
import collections
import datetime
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from cinder.api import common
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import keymgr
from cinder import objects
from cinder.objects import base as objects_base
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume.flows.api import manage_existing
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
allow_force_upload = cfg.BoolOpt('enable_force_upload',
default=False,
help='Enables the Force option on '
'upload_to_image. This enables '
'running upload_volume on in-use '
'volumes for backends that support it.')
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(allow_force_upload)
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(target_obj.obj_to_primitive() or {})
else:
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval tuple of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all_by_topic(ctxt, topic)
az_data = [(s.availability_zone, s.disabled)
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s.", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
LOG.info(_LI("Availability Zones retrieved successfully."))
return tuple(azs)
def _retype_is_possible(self, context,
first_type_id, second_type_id,
first_type=None, second_type=None):
safe = False
services = objects.ServiceList.get_all_by_topic(context,
'cinder-volume',
disabled=True)
if len(services.objects) == 1:
safe = True
else:
type_a = first_type or volume_types.get_volume_type(
context,
first_type_id)
type_b = second_type or volume_types.get_volume_type(
context,
second_type_id)
if(volume_utils.matching_backend_name(type_a['extra_specs'],
type_b['extra_specs'])):
safe = True
return safe
def _is_volume_migrating(self, volume):
# The migration status 'none' means no migration has ever been done
# before. The migration status 'error' means the previous migration
# failed. The migration status 'success' means the previous migration
# succeeded. The migration status 'deleting' means the source volume
# fails to delete after a migration.
# All of the statuses above means the volume is not in the process
# of a migration.
return volume['migration_status'] not in (None, 'deleting',
'error', 'success')
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None,
cgsnapshot=None, multiattach=False, source_cg=None):
check_policy(context, 'create')
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possibility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation of true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request: %s '
'(size argument must be an integer (or string '
'representation of an integer) and greater '
'than zero).') % size
raise exception.InvalidInput(reason=msg)
if consistencygroup and (not cgsnapshot and not source_cg):
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this consistency "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
if not self._retype_is_possible(
context,
volume_type['id'],
source_volume['volume_type_id'],
volume_type):
msg = _("Invalid volume_type provided: %s (requested type "
"is not compatible; either match source volume, "
"or omit type argument).") % volume_type['id']
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot.volume_type_id:
if not self._retype_is_possible(context,
volume_type['id'],
snapshot.volume_type_id,
volume_type):
msg = _("Invalid volume_type provided: %s (requested "
"type is not compatible; recommend omitting "
"the type argument).") % volume_type['id']
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
'multiattach': multiattach,
}
try:
sched_rpcapi = (self.scheduler_rpcapi if (not cgsnapshot and
not source_cg) else None)
volume_rpcapi = (self.volume_rpcapi if (not cgsnapshot and
not source_cg) else None)
flow_engine = create_volume.get_flow(self.db,
self.image_service,
availability_zones,
create_what,
sched_rpcapi,
volume_rpcapi)
except Exception:
msg = _('Failed to create api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Volume created successfully."), resource=vref)
return vref
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."),
resource={'type': 'volume',
'id': volume_id})
return
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
LOG.info(_LI('Unable to delete volume: %s, '
'volume is attached.'), volume['id'])
raise exception.VolumeAttached(volume_id=volume_id)
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s.") % volume['status']
LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
'volume must be available or '
'error, but is %(vol_status)s.'),
{'vol_id': volume['id'],
'vol_status': volume['status']})
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume):
# Volume is migrating, wait until done
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently migrating.'), volume['id'])
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if volume['consistencygroup_id'] is not None:
msg = _("Volume cannot be deleted while in a consistency group.")
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently part of a '
'consistency group.'), volume['id'])
raise exception.InvalidVolume(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume_id)
if len(snapshots):
LOG.info(_LI('Unable to delete volume: %s, '
'volume currently has snapshots.'), volume['id'])
msg = _("Volume still has %d dependent "
"snapshots.") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
cache = image_cache.ImageVolumeCache(self.db, self)
entry = cache.get_by_image_volume(context, volume_id)
if entry:
cache.evict(context, entry)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
vref = self.db.volume_update(context,
volume_id,
{'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI("Delete volume request issued successfully."),
resource=vref)
@wrap_check_policy
def update(self, context, volume, fields):
if volume['status'] == 'maintenance':
LOG.info(_LI("Unable to update volume, "
"because it is in maintenance."), resource=volume)
msg = _("The volume cannot be updated during maintenance.")
raise exception.InvalidVolume(reason=msg)
vref = self.db.volume_update(context, volume['id'], fields)
LOG.info(_LI("Volume updated successfully."), resource=vref)
def get(self, context, volume_id, viewable_admin_meta=False):
rv = self.db.volume_get(context, volume_id)
volume = dict(rv)
if viewable_admin_meta:
ctxt = context.elevated()
admin_metadata = self.db.volume_admin_metadata_get(ctxt,
volume_id)
volume['volume_admin_metadata'] = admin_metadata
try:
check_policy(context, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
LOG.info(_LI("Volume info retrieved successfully."), resource=rv)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, viewable_admin_meta=False,
offset=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
LOG.info(_LI("Get all volumes completed successfully."))
return volumes
def get_snapshot(self, context, snapshot_id):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
# FIXME(jdg): The objects don't have the db name entries
# so build the resource tag manually for now.
LOG.info(_LI("Snapshot retrieved successfully."),
resource={'type': 'snapshot',
'id': snapshot.id})
return snapshot
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
vref = self.db.volume_get(context, volume_id)
LOG.info(_LI("Volume retrieved successfully."), resource=vref)
return dict(vref)
def get_all_snapshots(self, context, search_opts=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if context.is_admin and 'all_tenants' in search_opts:
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = objects.SnapshotList.get_all(
context, search_opts, marker, limit, sort_keys, sort_dirs,
offset)
else:
snapshots = objects.SnapshotList.get_all_by_project(
context, context.project_id, search_opts, marker, limit,
sort_keys, sort_dirs, offset)
LOG.info(_LI("Get all snaphsots completed successfully."))
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
# NOTE(jdg): check for Race condition bug 1096983
# explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
elif volume['status'] == 'in-use':
if volume['multiattach']:
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume must be multiattachable to reserve again.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
else:
msg = _("Volume status must be available to reserve.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Reserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def unreserve_volume(self, context, volume):
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'attaching':
attaches = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if attaches:
self.update(context, volume, {"status": "in-use"})
else:
self.update(context, volume, {"status": "available"})
LOG.info(_LI("Unreserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def begin_detaching(self, context, volume):
# NOTE(vbala): The volume status might be 'detaching' already due to
# a previous begin_detaching call. Get updated volume status so that
# we fail such cases.
volume = self.db.volume_get(context, volume['id'])
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if self._is_volume_migrating(volume):
return
if (volume['status'] != 'in-use' or
volume['attach_status'] != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s.'") %
{'status': volume['status'],
'attach_status': volume['attach_status']})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
LOG.info(_LI("Begin detaching volume completed successfully."),
resource=volume)
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
LOG.info(_LI("Roll detaching of volume completed successfully."),
resource=volume)
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to attach volume, '
'because it is in maintenance.'), resource=volume)
msg = _("The volume cannot be attached in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
attach_results = self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attach_results
@wrap_check_policy
def detach(self, context, volume, attachment_id):
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to detach volume, '
'because it is in maintenance.'), resource=volume)
msg = _("The volume cannot be detached in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
detach_results = self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
LOG.info(_LI("Detach volume completed successfully."),
resource=volume)
return detach_results
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to initialize the connection for '
'volume, because it is in '
'maintenance.'), resource=volume)
msg = _("The volume connection cannot be initialized in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
init_results = self.volume_rpcapi.initialize_connection(context,
volume,
connector)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return init_results
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
self.unreserve_volume(context, volume)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to accept transfer for volume, '
'because it is in maintenance.'), resource=volume)
msg = _("The volume cannot accept transfer in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
results = self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume)
return results
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to create the snapshot for volume, '
'because it is in maintenance.'), resource=volume)
msg = _("The snapshot cannot be created when the volume is in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume):
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Volume %(vol_id)s status must be available, "
"but current status is: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
snapshot = None
try:
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata or {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if hasattr(snapshot, 'id'):
snapshot.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = objects.Snapshot(context=context, **options)
snapshot.create()
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
snap.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to create the snapshot for volume, '
'because it is in maintenance.'), resource=volume)
msg = _("The snapshot cannot be created when the volume is in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume):
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume %(vol_id)s "
"is not available, current volume status: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
volume_utils.process_reserve_over_quota(context, overs, usages,
quotas, volume['size'])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
result = self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
LOG.info(_LI("Snapshot create request issued successfully."),
resource=result)
return result
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
result = self._create_snapshot(context, volume, name, description,
True, metadata)
LOG.info(_LI("Snapshot force create request issued successfully."),
resource=result)
return result
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False,
unmanage_only=False):
if not force and snapshot.status not in ["available", "error"]:
LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, '
'due to invalid status. '
'Status must be available or '
'error, not %(snap_status)s.'),
{'snap_id': snapshot.id,
'snap_status': snapshot.status})
msg = _("Volume Snapshot status must be available or error.")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.cgsnapshot_id
if cgsnapshot_id:
msg = _('Unable to delete snapshot %s because it is part of a '
'consistency group.') % snapshot.id
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
snapshot_obj = self.get_snapshot(context, snapshot.id)
snapshot_obj.status = 'deleting'
snapshot_obj.save()
volume = self.db.volume_get(context, snapshot_obj.volume_id)
self.volume_rpcapi.delete_snapshot(context, snapshot_obj,
volume['host'],
unmanage_only=unmanage_only)
LOG.info(_LI("Snapshot delete request issued successfully."),
resource=snapshot)
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
snapshot.update(fields)
snapshot.save()
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume metadata completed successfully."),
resource=volume)
return dict(rv)
@wrap_check_policy
def delete_volume_metadata(self, context, volume,
key, meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item from a volume."""
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to delete the volume metadata, '
'because it is in maintenance.'), resource=volume)
msg = _("The volume metadata cannot be deleted when the volume "
"is in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
self.db.volume_metadata_delete(context, volume['id'], key, meta_type)
LOG.info(_LI("Delete volume metadata completed successfully."),
resource=volume)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume,
metadata, delete=False,
meta_type=common.METADATA_TYPES.user):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to update the metadata for volume, '
'because it is in maintenance.'), resource=volume)
msg = _("The volume metadata cannot be updated when the volume "
"is in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
if delete:
_metadata = metadata
else:
if meta_type == common.METADATA_TYPES.user:
orig_meta = self.get_volume_metadata(context, volume)
elif meta_type == common.METADATA_TYPES.image:
try:
orig_meta = self.get_volume_image_metadata(context,
volume)
except exception.GlanceMetadataNotFound:
orig_meta = {}
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume['id'])
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata,
delete,
meta_type)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume metadata completed successfully."),
resource=volume)
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
LOG.info(_LI("Get volume metadata key completed successfully."),
resource=volume)
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume admin metadata completed successfully."),
resource=volume)
return dict(rv)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume admin metadata completed successfully."),
resource=volume)
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot.id)
LOG.info(_LI("Get snapshot metadata completed successfully."),
resource=snapshot)
return snapshot_obj.metadata
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot.id)
snapshot_obj.delete_metadata_key(context, key)
LOG.info(_LI("Delete snapshot metadata completed successfully."),
resource=snapshot)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = snapshot.metadata
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
snapshot.metadata = _metadata
snapshot.save()
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update snapshot metadata completed successfully."),
resource=snapshot)
return snapshot.metadata
def get_snapshot_metadata_value(self, snapshot, key):
LOG.info(_LI("Get snapshot metadata value not implemented."),
resource=snapshot)
# FIXME(jdg): Huh? Pass?
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume image-metadata completed successfully."),
resource=volume)
return {meta_entry.key: meta_entry.value for meta_entry in db_data}
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be '
'available or in-use, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
if not CONF.enable_force_upload and force:
LOG.info(_LI("Force upload to image is disabled, "
"Force option will be ignored."),
resource={'type': 'volume', 'id': volume['id']})
force = False
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
properties = {custom_property:
volume_image_metadata[custom_property]
for custom_property in custom_property_set}
metadata.update(dict(properties=properties))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
recv_metadata = self.image_service.create(
context, self.image_service._translate_to_glance(metadata))
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
LOG.info(_LI("Copy image to volume completed successfully."),
resource=volume)
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to extend, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s).") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reserve_opts = {'gigabytes': size_increase}
QUOTAS.add_volume_type_opts(context, reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=volume['project_id'],
**reserve_opts)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).")
LOG.error(msg, {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
LOG.info(_LI("Extend volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy,
lock_volume):
"""Migrate the volume to the specified host."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be available or in-use, '
'but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration.
if self._is_volume_migrating(volume):
msg = _("Volume %s is already part of an active "
"migration.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = objects.SnapshotList.get_all_for_volume(context, volume['id'])
if snaps:
msg = _("Volume %s must not have snapshots.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume %s must not be replicated.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume %s must not be part of a consistency "
"group.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = objects.ServiceList.get_all_by_topic(
elevated, topic, disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service.host == svc_host:
found = True
if not found:
msg = _('No available service named %s') % host
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different '
'than the current host.')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# When the migration of an available volume starts, both the status
# and the migration status of the volume will be changed.
# If the admin sets lock_volume flag to True, the volume
# status is changed to 'maintenance', telling users
# that this volume is in maintenance mode, and no action is allowed
# on this volume, e.g. attach, detach, retype, migrate, etc.
updates = {'migration_status': 'starting',
'previous_status': volume['status']}
if lock_volume and volume['status'] == 'available':
updates['status'] = 'maintenance'
self.update(context, volume, updates)
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
LOG.info(_LI("Migrate volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Migrate volume completion issued successfully."),
resource=volume)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to update readonly flag, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
LOG.info(_LI("Update readonly setting on volume "
"completed successfully."),
resource=volume)
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status: '
'%(vol_status)s on volume: %(vol_id)s. Volume status '
'must be available or '
'in-use.') % {'vol_status': volume['status'],
'vol_id': volume['id']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume):
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = _('New volume_type same as original: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements.')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volume: %s.') % volume['id']
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping',
'previous_status': volume['status']})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
LOG.info(_LI("Retype volume request issued successfully."),
resource=volume)
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = objects.Service.get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service: %(service)s for '
'given host: %(host)s.'),
{'service': CONF.volume_topic, 'host': host})
availability_zone = service.get('availability_zone')
manage_what = {
'context': context,
'name': name,
'description': description,
'host': host,
'ref': ref,
'volume_type': volume_type,
'metadata': metadata,
'availability_zone': availability_zone,
'bootable': bootable,
}
try:
flow_engine = manage_existing.get_flow(self.scheduler_rpcapi,
self.db,
manage_what)
except Exception:
msg = _('Failed to manage api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinder's debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vol_ref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Manage volume request issued successfully."),
resource=vol_ref)
return vol_ref
def manage_existing_snapshot(self, context, ref, volume,
name=None, description=None,
metadata=None):
host = volume_utils.extract_host(volume['host'])
try:
self.db.service_get_by_host_and_topic(
context.elevated(), host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service: %(service)s for '
'given host: %(host)s.'),
{'service': CONF.volume_topic, 'host': host})
snapshot_object = self.create_snapshot_in_db(context, volume, name,
description, False,
metadata, None)
self.volume_rpcapi.manage_existing_snapshot(context, snapshot_object,
ref, host)
return snapshot_object
# Replication V2 methods ##
# NOTE(jdg): It might be kinda silly to propogate the named
# args with defaults all the way down through rpc into manager
# but for now the consistency is useful, and there may be
# some usefulness in the future (direct calls in manager?)
# NOTE(jdg): Relying solely on the volume-type quota mechanism
# need to consider looking at how we handle configured backends
# WRT quotas, do they count against normal quotas or not? For
# now they're a special resource, so no.
@wrap_check_policy
def enable_replication(self, ctxt, volume):
# NOTE(jdg): details like sync vs async
# and replica count are to be set via the
# volume-type and config files.
# Get a fresh ref from db and check status
volume = self.db.volume_get(ctxt, volume['id'])
# NOTE(jdg): Set a valid status as a var to minimize errors via typos
# also, use a list, we may want to add to it some day
# TODO(jdg): Move these up to a global list for each call and ban the
# free form typing of states and state checks going forward
# NOTE(jdg): There may be a need for some backends to allow this
# call to driver regardless of replication_status, most likely
# this indicates an issue with the driver, but might be useful
# cases to consider modifying this for in the future.
valid_rep_status = ['disabled']
rep_status = volume.get('replication_status', valid_rep_status[0])
if rep_status not in valid_rep_status:
msg = (_("Invalid status to enable replication. "
"valid states are: %(valid_states)s, "
"current replication-state is: %(curr_state)s."),
{'valid_states': valid_rep_status,
'curr_state': rep_status})
raise exception.InvalidVolume(reason=msg)
vref = self.db.volume_update(ctxt,
volume['id'],
{'replication_status': 'enabling'})
self.volume_rpcapi.enable_replication(ctxt, vref)
@wrap_check_policy
def disable_replication(self, ctxt, volume):
valid_disable_status = ['disabled', 'enabled']
# NOTE(jdg): Just use disabled here (item 1 in the list) this
# way if someone says disable_rep on a volume that's not being
# replicated we just say "ok, done"
rep_status = volume.get('replication_status', valid_disable_status[0])
if rep_status not in valid_disable_status:
msg = (_("Invalid status to disable replication. "
"valid states are: %(valid_states)s, "
"current replication-state is: %(curr_state)s."),
{'valid_states': valid_disable_status,
'curr_state': rep_status})
raise exception.InvalidVolume(reason=msg)
vref = self.db.volume_update(ctxt,
volume['id'],
{'replication_status': 'disabling'})
self.volume_rpcapi.disable_replication(ctxt, vref)
@wrap_check_policy
def failover_replication(self,
ctxt,
volume,
secondary=None):
# FIXME(jdg): What is the secondary argument?
# for managed secondaries that's easy; it's a host
# for others, it's tricky; will propose a format for
# secondaries that includes an ID/Name that can be
# used as a handle
valid_failover_status = ['enabled']
rep_status = volume.get('replication_status', 'na')
if rep_status not in valid_failover_status:
msg = (_("Invalid status to failover replication. "
"valid states are: %(valid_states)s, "
"current replication-state is: %(curr_state)s."),
{'valid_states': valid_failover_status,
'curr_state': rep_status})
raise exception.InvalidVolume(reason=msg)
vref = self.db.volume_update(
ctxt,
volume['id'],
{'replication_status': 'enabling_secondary'})
self.volume_rpcapi.failover_replication(ctxt,
vref,
secondary)
@wrap_check_policy
def list_replication_targets(self, ctxt, volume):
# NOTE(jdg): This collects info for the specified volume
# it is NOT an error if the volume is not being replicated
# also, would be worth having something at a backend/host
# level to show an admin how a backend is configured.
return self.volume_rpcapi.list_replication_targets(ctxt, volume)
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window.
On start, it triggers volume evacuation.
"""
raise NotImplementedError()
| {
"content_hash": "11c06addc42067a723be5a8c89a1bd04",
"timestamp": "",
"source": "github",
"line_count": 1708,
"max_line_length": 79,
"avg_line_length": 45.24882903981265,
"alnum_prop": 0.5339716633240603,
"repo_name": "takeshineshiro/cinder",
"id": "3b8cfece435bdda280e80602125d7690faced2f8",
"size": "78017",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/volume/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13094130"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
from .database import create_database_pool, Database, transaction
from .session import setup_aiohttp_session
from .auth import userdata_from_web_request, userdata_from_rest_request, \
web_authenticated_users_only, web_maybe_authenticated_user, rest_authenticated_users_only, \
web_authenticated_developers_only, rest_authenticated_developers_only
from .csrf import new_csrf_token, check_csrf_token
from .auth_utils import insert_user, create_session
__all__ = [
'create_database_pool',
'Database',
'setup_aiohttp_session',
'userdata_from_web_request',
'userdata_from_rest_request',
'web_authenticated_users_only',
'web_maybe_authenticated_user',
'web_authenticated_developers_only',
'rest_authenticated_users_only',
'rest_authenticated_developers_only',
'new_csrf_token',
'check_csrf_token',
'insert_user',
'create_session',
'transaction'
]
| {
"content_hash": "13624ba54c892899daf0b6d45c46dcea",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 96,
"avg_line_length": 36.4,
"alnum_prop": 0.7241758241758242,
"repo_name": "danking/hail",
"id": "20b58d424da4dc5587381f2fc6f11c1bc8e62309",
"size": "910",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "gear/gear/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CSS",
"bytes": "29124"
},
{
"name": "Dockerfile",
"bytes": "13073"
},
{
"name": "Emacs Lisp",
"bytes": "252"
},
{
"name": "HTML",
"bytes": "151709"
},
{
"name": "Java",
"bytes": "32302"
},
{
"name": "JavaScript",
"bytes": "3309"
},
{
"name": "Jupyter Notebook",
"bytes": "162395"
},
{
"name": "Makefile",
"bytes": "73914"
},
{
"name": "Python",
"bytes": "4149266"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "9075"
},
{
"name": "Scala",
"bytes": "4426573"
},
{
"name": "Shell",
"bytes": "49103"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
} |
import unittest
import utils
from threading import Lock
from LogicaNegocios import *
import Globales
Globales.material_lock = Lock()
class PruebasUnitarias(unittest.TestCase):
@classmethod
def setUpClass(cls):
print "============Guardando copia tablas y estableciendo escenario de datos=============="
print
db = utils.ConnectDB()
db.execute('DROP TABLE IF EXISTS almacenes_copy')
db.execute('CREATE TABLE almacenes_copy LIKE almacenes')
db.execute('INSERT INTO almacenes_copy SELECT * FROM almacenes')
db.execute('TRUNCATE almacenes')
db.execute('COMMIT')
db.execute('DROP TABLE IF EXISTS materiales_copy')
db.execute('CREATE TABLE materiales_copy LIKE materiales')
db.execute('INSERT INTO materiales_copy SELECT * FROM materiales')
db.execute('TRUNCATE materiales')
db.execute('COMMIT')
db.execute('DROP TABLE IF EXISTS inventario_copy')
db.execute('CREATE TABLE inventario_copy LIKE inventario')
db.execute('INSERT INTO inventario_copy SELECT * FROM inventario')
db.execute('TRUNCATE inventario')
db.execute('COMMIT')
db.execute('INSERT INTO materiales (idmat,descripcion,grupo1,grupo2,grupo3,grupo4,unidad) VALUES (%s,%s,%s,%s,%s,%s,%s)',
('P1','Material Prueba 1', 'G1','G2','G3','G4','kg')
)
db.execute('INSERT INTO almacenes (idalm,descripcion,data_group) VALUES (%s,%s,%s)',
('A1','almacen prueba 1', 'DG1')
)
db.execute('COMMIT')
@classmethod
def tearDownClass(cls):
print
print "=================Recuperando tablas originales============"
db = utils.ConnectDB()
db.execute('TRUNCATE inventario')
db.execute('INSERT INTO inventario SELECT * FROM inventario_copy')
db.execute('DROP TABLE inventario_copy')
db.execute('COMMIT')
db.execute('TRUNCATE materiales')
db.execute('INSERT INTO materiales SELECT * FROM materiales_copy')
db.execute('DROP TABLE materiales_copy')
db.execute('COMMIT')
db.execute('TRUNCATE almacenes')
db.execute('INSERT INTO almacenes SELECT * FROM almacenes_copy')
db.execute('DROP TABLE almacenes_copy')
db.execute('COMMIT')
def test_a_entrada_material_sin_saldo(self):
modificar_inventario_material('A1','P1',3.5,'kg')
db = utils.ConnectDB()
db.execute('SELECT cantidad FROM inventario WHERE idalm = %s and idmat = %s',
('A1','P1')
)
cantidad, = db.fetchone().values()
test_value = 3.5
self.assertEqual(cantidad,test_value,msg="El inventario deberia dar {}kg el resultado fue {}kg".format(test_value,cantidad))
def test_b_entrada_material_con_saldo(self):
modificar_inventario_material('A1','P1',3,'kg')
db = utils.ConnectDB()
db.execute('SELECT cantidad FROM inventario WHERE idalm = %s and idmat = %s',
('A1','P1')
)
cantidad, = db.fetchone().values()
test_value = 6.5
self.assertEqual(cantidad,test_value,msg="El inventario deberia dar {}kg el resultado fue {}kg".format(test_value,cantidad))
def test_c_salida_material_con_saldo(self):
modificar_inventario_material('A1','P1',-6.5,'kg')
db = utils.ConnectDB()
db.execute('SELECT cantidad FROM inventario WHERE idalm = %s and idmat = %s',
('A1','P1')
)
cantidad, = db.fetchone().values()
test_value = 0.0
self.assertEqual(cantidad,test_value,msg="El inventario deberia dar {}kg el resultado fue {}kg".format(test_value,cantidad))
def test_d_salida_material_sin_saldo(self):
with self.assertRaises(Exception):
modificar_inventario_material('A1','P1',-3,'kg')
##
## Monkeypatch para hacer debugging usando pdb
##
## Para ejecutarlo usar pdb prueba.py
##
#unittest.TestCase.run = lambda self,*args,**kw: unittest.TestCase.debug(self)
unittest.main()
| {
"content_hash": "b1301d8819242b83cd5564bd765b12eb",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 132,
"avg_line_length": 34.914529914529915,
"alnum_prop": 0.621297429620563,
"repo_name": "andres-hurtado-lopez/naranjaverdeprod",
"id": "3d98eb1c80831703535a32d71a6e81b3fa40f5ce",
"size": "4132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LogicaNegocios_pruebas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "214"
},
{
"name": "CSS",
"bytes": "313"
},
{
"name": "HTML",
"bytes": "179287"
},
{
"name": "JavaScript",
"bytes": "120099"
},
{
"name": "Python",
"bytes": "162953"
},
{
"name": "Shell",
"bytes": "1767"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.optimizers import RMSprop
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.datasets import imdb
'''
This example demonstrates the use of Convolution1D
for text classification.
Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_cnn.py
Get to 0.8330 test accuracy after 3 epochs. 100s/epoch on K520 GPU.
'''
# set parameters:
max_features = 5000
maxlen = 100
batch_size = 32
embedding_dims = 100
nb_filter = 250
filter_length = 3
hidden_dims = 250
nb_epoch = 3
print("Loading data...")
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features,
test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(0.25))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode="valid",
activation="relu",
subsample_length=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_length=2))
# We flatten the output of the conv layer, so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.25))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode="binary")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, validation_data=(X_test, y_test))
| {
"content_hash": "b0e3188d10de3de74e6690f83d300ffe",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 123,
"avg_line_length": 34.4078947368421,
"alnum_prop": 0.7131931166347992,
"repo_name": "rajegannathan/grasp-lift-eeg-cat-dog-solution-updated",
"id": "54096ab5601ee52b342f0fa929df4ed89fce983d",
"size": "2615",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python-packages/keras-0.2.0/examples/imdb_cnn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "113"
},
{
"name": "C",
"bytes": "9257"
},
{
"name": "C++",
"bytes": "410482"
},
{
"name": "CSS",
"bytes": "3812"
},
{
"name": "Makefile",
"bytes": "23871"
},
{
"name": "PHP",
"bytes": "2068"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5993790"
},
{
"name": "R",
"bytes": "145799"
},
{
"name": "Shell",
"bytes": "8953"
},
{
"name": "TeX",
"bytes": "912"
}
],
"symlink_target": ""
} |
from django import forms
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.contrib import admin
from django.contrib.auth.models import Group
from django_messages.utils import get_user_model
User = get_user_model()
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from django_messages.models import Message
class MessageAdminForm(forms.ModelForm):
"""
Custom AdminForm to enable messages to groups and all users.
"""
recipient = forms.ModelChoiceField(
label=_('Recipient'), queryset=User.objects.all(), required=True)
group = forms.ChoiceField(label=_('group'), required=False,
help_text=_('Creates the message optionally for all users or a group of users.'))
def __init__(self, *args, **kwargs):
super(MessageAdminForm, self).__init__(*args, **kwargs)
self.fields['group'].choices = self._get_group_choices()
def _get_group_choices(self):
return [('', u'---------'), ('all', _('All users'))] + \
[(group.pk, group.name) for group in Group.objects.all()]
class Meta:
model = Message
class MessageAdmin(admin.ModelAdmin):
form = MessageAdminForm
fieldsets = (
(None, {
'fields': (
'sender',
('recipient', 'group'),
),
}),
(_('Message'), {
'fields': (
'parent_msg',
'subject', 'body',
),
'classes': ('monospace' ),
}),
(_('Date/time'), {
'fields': (
'sent_at', 'read_at', 'replied_at',
'sender_deleted_at', 'recipient_deleted_at',
),
'classes': ('collapse', 'wide'),
}),
)
list_display = ('subject', 'sender', 'recipient', 'sent_at', 'read_at')
list_filter = ('sent_at', 'sender', 'recipient')
search_fields = ('subject', 'body')
def save_model(self, request, obj, form, change):
"""
Saves the message for the recipient and looks in the form instance
for other possible recipients. Prevents duplication by excludin the
original recipient from the list of optional recipients.
When changing an existing message and choosing optional recipients,
the message is effectively resent to those users.
"""
obj.save()
if notification:
# Getting the appropriate notice labels for the sender and recipients.
if obj.parent_msg is None:
sender_label = 'messages_sent'
recipients_label = 'messages_received'
else:
sender_label = 'messages_replied'
recipients_label = 'messages_reply_received'
# Notification for the sender.
notification.send([obj.sender], sender_label, {'message': obj,})
if form.cleaned_data['group'] == 'all':
# send to all users
recipients = User.objects.exclude(pk=obj.recipient.pk)
else:
# send to a group of users
recipients = []
group = form.cleaned_data['group']
if group:
group = Group.objects.get(pk=group)
recipients.extend(
list(group.user_set.exclude(pk=obj.recipient.pk)))
# create messages for all found recipients
for user in recipients:
obj.pk = None
obj.recipient = user
obj.save()
if notification:
# Notification for the recipient.
notification.send([user], recipients_label, {'message' : obj,})
admin.site.register(Message, MessageAdmin)
| {
"content_hash": "bac240075c861aedf62dcd033a670d3a",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 89,
"avg_line_length": 34.872727272727275,
"alnum_prop": 0.5664754953076121,
"repo_name": "michaelmior/django-messages",
"id": "d9b88e1d15783f98c55a6d6f7ec6096d4e0015a0",
"size": "3836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_messages/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "55444"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from shipments.db_views import drop_views, add_views
class Migration(migrations.Migration):
dependencies = [
('shipments', '0013_merge'),
]
operations = [
migrations.RunPython(drop_views, add_views),
migrations.RenameModel("Location", "PackageScan"),
migrations.AlterModelTable(
name='packagescan',
table='shipments_location',
),
migrations.RenameField(
model_name='package',
old_name='last_location',
new_name='last_scan',
),
migrations.RenameField(
model_name='package',
old_name='last_location_status_label',
new_name='last_scan_status_label',
),
migrations.RenameField(
model_name='shipment',
old_name='last_location_status_label',
new_name='last_scan_status_label',
),
migrations.RunPython(add_views, drop_views),
]
| {
"content_hash": "2a590cc608f9e34b7fd64be69799d1c5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 58,
"avg_line_length": 28.43243243243243,
"alnum_prop": 0.5826996197718631,
"repo_name": "stbenjam/CTS",
"id": "b70c5d5ceb4a69a9238ee8affb1fe28fddf58b3e",
"size": "1076",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "shipments/migrations/0014_rename_location_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "195046"
},
{
"name": "HTML",
"bytes": "130015"
},
{
"name": "JavaScript",
"bytes": "603681"
},
{
"name": "Python",
"bytes": "499335"
},
{
"name": "Shell",
"bytes": "1430"
}
],
"symlink_target": ""
} |
import re
import time
from gcp_common import BaseTest, event_data
from googleapiclient.errors import HttpError
class InstanceTest(BaseTest):
def test_instance_query(self):
factory = self.replay_flight_data('instance-query')
p = self.load_policy(
{'name': 'all-instances',
'resource': 'gcp.instance'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_instance_get(self):
factory = self.replay_flight_data('instance-get')
p = self.load_policy(
{'name': 'one-instance',
'resource': 'gcp.instance'},
session_factory=factory)
instance = p.resource_manager.get_resource(
{"instance_id": "2966820606951926687",
"project_id": "custodian-1291",
"resourceName": "projects/custodian-1291/zones/us-central1-b/instances/c7n-jenkins",
"zone": "us-central1-b"})
self.assertEqual(instance['status'], 'RUNNING')
def test_stop_instance(self):
project_id = 'cloud-custodian'
factory = self.replay_flight_data('instance-stop', project_id=project_id)
p = self.load_policy(
{'name': 'istop',
'resource': 'gcp.instance',
'filters': [{'name': 'instance-1'}, {'status': 'RUNNING'}],
'actions': ['stop']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = instance-1',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertEqual(result['items'][0]['status'], 'STOPPING')
def test_start_instance(self):
project_id = 'cloud-custodian'
factory = self.replay_flight_data('instance-start', project_id=project_id)
p = self.load_policy(
{'name': 'istart',
'resource': 'gcp.instance',
'filters': [{'tag:env': 'dev'}, {'status': 'TERMINATED'}],
'actions': ['start']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(3)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'labels.env=dev',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertEqual(result['items'][0]['status'], 'PROVISIONING')
def test_delete_instance(self):
project_id = 'cloud-custodian'
factory = self.replay_flight_data('instance-terminate', project_id=project_id)
p = self.load_policy(
{'name': 'iterm',
'resource': 'gcp.instance',
'filters': [{'name': 'instance-1'}],
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(1)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = instance-1',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertEqual(result['items'][0]['status'], 'STOPPING')
def test_label_instance(self):
project_id = 'team-saasops'
factory = self.replay_flight_data('instance-label', project_id=project_id)
p = self.load_policy(
{'name': 'ilabel',
'resource': 'gcp.instance',
'filters': [{'name': 'test-ingwar'}],
'actions': [{'type': 'set-labels',
'labels': {'test_label': 'test_value'}}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(1)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = test-ingwar',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertEqual(result['items'][0]['labels']['test_label'], 'test_value')
def test_mark_for_op_instance(self):
project_id = 'team-saasops'
factory = self.replay_flight_data('instance-label', project_id=project_id)
p = self.load_policy(
{'name': 'ilabel',
'resource': 'gcp.instance',
'filters': [{'type': 'marked-for-op',
'op': 'stop'}],
'actions': [{'type': 'mark-for-op',
'op': 'start'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(1)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = test-ingwar',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertTrue(result['items'][0]['labels']['custodian_status']
.startswith("resource_policy-start"))
def test_detach_disks_from_instance(self):
project_id = 'custodian-tests'
factory = self.replay_flight_data('instance-detach-disks', project_id=project_id)
p = self.load_policy(
{'name': 'idetach',
'resource': 'gcp.instance',
'filters': [{'name': 'test-ingwar'}],
'actions': [{'type': 'detach-disks'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = test-ingwar',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertIsNone(result['items'][0].get("disks"))
class DiskTest(BaseTest):
def test_disk_query(self):
factory = self.replay_flight_data('disk-query', project_id='custodian-1291')
p = self.load_policy(
{'name': 'all-disks',
'resource': 'gcp.disk'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 6)
def test_disk_snapshot(self):
factory = self.replay_flight_data('disk-snapshot', project_id='custodian-1291')
p = self.load_policy(
{'name': 'all-images',
'resource': 'gcp.disk',
'filters': [
{'name': 'c7n-jenkins'}],
'actions': ['snapshot']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_disk_snapshot_add_date(self):
factory = self.replay_flight_data('disk-snapshot', project_id='custodian-1291')
p = self.load_policy(
{'name': 'all-images',
'resource': 'gcp.disk',
'filters': [
{'name': 'c7n-jenkins'}],
'actions': [{'type': 'snapshot', 'name_format': "{disk[name]:.50}-{now:%Y-%m-%d}"}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_disk_delete(self):
project_id = 'custodian-1291'
resource_name = 'c7n-jenkins'
factory = self.replay_flight_data('disk-delete', project_id=project_id)
policy = self.load_policy(
{'name': 'all-images',
'resource': 'gcp.disk',
'filters': [
{'name': resource_name}],
'actions': ['delete']},
session_factory=factory)
resources = policy.run()
self.assertEqual(resources[0]['name'], resource_name)
client = policy.resource_manager.get_client()
zone = resources[0]['zone'].rsplit('/', 1)[-1]
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = instance-1',
'zone': zone})
self.assertEqual(len(result['items']["zones/{}".format(zone)]['disks']), 0)
def test_label_disk(self):
project_id = 'team-saasops'
factory = self.replay_flight_data('disk-label', project_id=project_id)
p = self.load_policy(
{'name': 'disk-label',
'resource': 'gcp.disk',
'filters': [{'name': 'test-ingwar'}],
'actions': [{'type': 'set-labels',
'labels': {'test_label': 'test_value'}}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(1)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'filter': 'name = test-ingwar',
'zone': resources[0]['zone'].rsplit('/', 1)[-1]})
self.assertEqual(result['items'][0]['labels']['test_label'], 'test_value')
class SnapshotTest(BaseTest):
def test_snapshot_query(self):
factory = self.replay_flight_data(
'snapshot-query', project_id='cloud-custodian')
p = self.load_policy(
{'name': 'all-disks',
'resource': 'gcp.snapshot'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_snapshot_delete(self):
factory = self.replay_flight_data(
'snapshot-delete', project_id='cloud-custodian')
p = self.load_policy(
{'name': 'all-disks',
'resource': 'gcp.snapshot',
'filters': [
{'name': 'snapshot-1'}],
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class ImageTest(BaseTest):
def test_image_query(self):
factory = self.replay_flight_data(
'image-query', project_id='cloud-custodian')
p = self.load_policy(
{'name': 'all-images',
'resource': 'gcp.image'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_image_delete(self):
factory = self.replay_flight_data(
'image-delete', project_id='cloud-custodian')
p = self.load_policy(
{'name': 'all-images',
'resource': 'gcp.image',
'filters': [
{'name': 'image-1'}],
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class InstanceTemplateTest(BaseTest):
def test_instance_template_query(self):
project_id = 'cloud-custodian'
resource_name = 'custodian-instance-template'
session_factory = self.replay_flight_data(
'instance-template-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-instance-template-dryrun',
'resource': 'gcp.instance-template'},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(resources[0]['name'], resource_name)
def test_instance_template_get(self):
resource_name = 'custodian-instance-template'
session_factory = self.replay_flight_data(
'instance-template-get')
policy = self.load_policy(
{'name': 'gcp-instance-template-audit',
'resource': 'gcp.instance-template',
'mode': {
'type': 'gcp-audit',
'methods': ['beta.compute.instanceTemplates.insert']
}},
session_factory=session_factory)
exec_mode = policy.get_execution_mode()
event = event_data('instance-template-create.json')
resources = exec_mode.run(event, None)
self.assertEqual(resources[0]['name'], resource_name)
def test_instance_template_delete(self):
project_id = 'cloud-custodian'
resource_name = 'instance-template-to-delete'
resource_full_name = 'projects/%s/global/instanceTemplates/%s' % (project_id, resource_name)
session_factory = self.replay_flight_data(
'instance-template-delete', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-instance-template-delete',
'resource': 'gcp.instance-template',
'filters': [{
'type': 'value',
'key': 'name',
'value': resource_name
}],
'actions': [{'type': 'delete'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(resources[0]['name'], resource_name)
if self.recording:
time.sleep(1)
client = policy.resource_manager.get_client()
try:
result = client.execute_query(
'get', {'project': project_id,
'instanceTemplate': resource_name})
self.fail('found deleted resource: %s' % result)
except HttpError as e:
self.assertTrue(re.match(".*The resource '%s' was not found.*" %
resource_full_name, str(e)))
class AutoscalerTest(BaseTest):
def test_autoscaler_query(self):
project_id = 'cloud-custodian'
resource_name = 'micro-instance-group-1-to-10'
session_factory = self.replay_flight_data('autoscaler-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-autoscaler-dryrun',
'resource': 'gcp.autoscaler'},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(resources[0]['name'], resource_name)
def test_autoscaler_get(self):
resource_name = 'instance-group-1'
session_factory = self.replay_flight_data('autoscaler-get')
policy = self.load_policy(
{'name': 'gcp-autoscaler-audit',
'resource': 'gcp.autoscaler',
'mode': {
'type': 'gcp-audit',
'methods': ['v1.compute.autoscalers.insert']
}},
session_factory=session_factory)
exec_mode = policy.get_execution_mode()
event = event_data('autoscaler-insert.json')
resources = exec_mode.run(event, None)
self.assertEqual(resources[0]['name'], resource_name)
def test_autoscaler_set(self):
project_id = 'mitrop-custodian'
factory = self.replay_flight_data('autoscaler-set', project_id=project_id)
p = self.load_policy(
{'name': 'gcp-autoscaler-set',
'resource': 'gcp.autoscaler',
'filters': [{'name': 'instance-group-2'}],
'actions': [{'type': 'set',
'coolDownPeriodSec': 30,
'cpuUtilization': {
'utilizationTarget': 0.7
},
'loadBalancingUtilization': {
'utilizationTarget': 0.7
},
'minNumReplicas': 1,
'maxNumReplicas': 4
}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(3)
client = p.resource_manager.get_client()
result = client.execute_query(
'list', {'project': project_id,
'zone': 'us-central1-a',
'filter': 'name = instance-group-2'})
result_policy = result['items'][0]['autoscalingPolicy']
self.assertEqual(result_policy['coolDownPeriodSec'], 30)
self.assertEqual(result_policy['cpuUtilization']['utilizationTarget'], 0.7)
self.assertEqual(result_policy['loadBalancingUtilization']['utilizationTarget'], 0.7)
self.assertEqual(result_policy['minNumReplicas'], 1)
self.assertEqual(result_policy['maxNumReplicas'], 4)
| {
"content_hash": "26a95830683797f134393ff3b19667fb",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 100,
"avg_line_length": 38.33718244803695,
"alnum_prop": 0.5328915662650603,
"repo_name": "capitalone/cloud-custodian",
"id": "f67d00050b63a3b8dd919e55d9a5f66dde0af538",
"size": "16723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_gcp/tests/test_compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
import micropython
# these functions are not always available
if not hasattr(micropython, "mem_info"):
print("SKIP")
else:
micropython.mem_info()
micropython.mem_info(1)
micropython.qstr_info()
micropython.qstr_info(1)
| {
"content_hash": "6d9de9d81701b3085fc441d015f83f3e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 24,
"alnum_prop": 0.7125,
"repo_name": "pramasoul/micropython",
"id": "9df341fbb833484f7b85f2055ec6ba1f578bccdc",
"size": "289",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "tests/micropython/meminfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "35133638"
},
{
"name": "C++",
"bytes": "703228"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "75592"
},
{
"name": "Objective-C",
"bytes": "391937"
},
{
"name": "Python",
"bytes": "588844"
},
{
"name": "Shell",
"bytes": "4829"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imager_profile', '0005_merge_20170119_1241'),
('imager_profile', '0007_auto_20170124_1644'),
]
operations = [
]
| {
"content_hash": "1901164bc38804bbd899b1c7ebccaede",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 20,
"alnum_prop": 0.6464285714285715,
"repo_name": "clair3st/django-imager",
"id": "08c3b9c50faf53ee68d5e536c0589ce43a573d80",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/migrations/0008_merge_20170130_2253.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4601"
},
{
"name": "HTML",
"bytes": "20459"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "93575"
}
],
"symlink_target": ""
} |
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import codecs
import marshal
import os
import re
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_GLOBAL_CUSTOM_OBJECTS = {}
@tf_export('keras.utils.CustomObjectScope')
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@tf_export('keras.utils.custom_object_scope')
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
@tf_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
@tf_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
@tf_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
config['config'],
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name + ':' +
function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Arguments:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
# pylint: disable=pointless-statement
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
else:
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Arguments:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args
@tf_export('keras.utils.Progbar')
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start '
'is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
else:
return [None if x is None else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def object_list_uid(object_list):
"""Creates a single string from object ids."""
object_list = nest.flatten(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def is_all_none(iterable_or_element):
if not isinstance(iterable_or_element, (list, tuple)):
iterable = [iterable_or_element]
else:
iterable = iterable_or_element
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
| {
"content_hash": "148d489ac84d5c7955e9000549b0116e",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 80,
"avg_line_length": 31.427787934186473,
"alnum_prop": 0.6157873305799546,
"repo_name": "xodus7/tensorflow",
"id": "2e56fa2dc5474678ba3ef765bc148f09c4665ec0",
"size": "17880",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/utils/generic_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "340946"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48861698"
},
{
"name": "CMake",
"bytes": "195699"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1240309"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834061"
},
{
"name": "Jupyter Notebook",
"bytes": "2604756"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40952138"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "459258"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import pickle
import weakref
from uuid import uuid4, UUID
import numpy as np
import pyarrow as pa
import pytest
class IntegerType(pa.PyExtensionType):
def __init__(self):
pa.PyExtensionType.__init__(self, pa.int64())
def __reduce__(self):
return IntegerType, ()
class UuidScalarType(pa.ExtensionScalar):
def as_py(self):
return None if self.value is None else UUID(bytes=self.value.as_py())
class UuidType(pa.PyExtensionType):
def __init__(self):
pa.PyExtensionType.__init__(self, pa.binary(16))
def __reduce__(self):
return UuidType, ()
def __arrow_ext_scalar_class__(self):
return UuidScalarType
class UuidType2(pa.PyExtensionType):
def __init__(self):
pa.PyExtensionType.__init__(self, pa.binary(16))
def __reduce__(self):
return UuidType, ()
class ParamExtType(pa.PyExtensionType):
def __init__(self, width):
self._width = width
pa.PyExtensionType.__init__(self, pa.binary(width))
@property
def width(self):
return self._width
def __reduce__(self):
return ParamExtType, (self.width,)
class MyStructType(pa.PyExtensionType):
storage_type = pa.struct([('left', pa.int64()),
('right', pa.int64())])
def __init__(self):
pa.PyExtensionType.__init__(self, self.storage_type)
def __reduce__(self):
return MyStructType, ()
class MyListType(pa.PyExtensionType):
def __init__(self, storage_type):
pa.PyExtensionType.__init__(self, storage_type)
def __reduce__(self):
return MyListType, (self.storage_type,)
class AnnotatedType(pa.PyExtensionType):
"""
Generic extension type that can store any storage type.
"""
def __init__(self, storage_type, annotation):
self.annotation = annotation
super().__init__(storage_type)
def __reduce__(self):
return AnnotatedType, (self.storage_type, self.annotation)
def ipc_write_batch(batch):
stream = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
writer.close()
return stream.getvalue()
def ipc_read_batch(buf):
reader = pa.RecordBatchStreamReader(buf)
return reader.read_next_batch()
def test_ext_type_basics():
ty = UuidType()
assert ty.extension_name == "arrow.py_extension_type"
def test_ext_type_str():
ty = IntegerType()
expected = "extension<arrow.py_extension_type<IntegerType>>"
assert str(ty) == expected
assert pa.DataType.__str__(ty) == expected
def test_ext_type_repr():
ty = IntegerType()
assert repr(ty) == "IntegerType(DataType(int64))"
def test_ext_type__lifetime():
ty = UuidType()
wr = weakref.ref(ty)
del ty
assert wr() is None
def test_ext_type__storage_type():
ty = UuidType()
assert ty.storage_type == pa.binary(16)
assert ty.__class__ is UuidType
ty = ParamExtType(5)
assert ty.storage_type == pa.binary(5)
assert ty.__class__ is ParamExtType
def test_ext_type_as_py():
ty = UuidType()
expected = uuid4()
scalar = pa.ExtensionScalar.from_storage(ty, expected.bytes)
assert scalar.as_py() == expected
# test array
uuids = [uuid4() for _ in range(3)]
storage = pa.array([uuid.bytes for uuid in uuids], type=pa.binary(16))
arr = pa.ExtensionArray.from_storage(ty, storage)
# Works for __get_item__
for i, expected in enumerate(uuids):
assert arr[i].as_py() == expected
# Works for __iter__
for result, expected in zip(arr, uuids):
assert result.as_py() == expected
# test chunked array
data = [
pa.ExtensionArray.from_storage(ty, storage),
pa.ExtensionArray.from_storage(ty, storage)
]
carr = pa.chunked_array(data)
for i, expected in enumerate(uuids + uuids):
assert carr[i].as_py() == expected
for result, expected in zip(carr, uuids + uuids):
assert result.as_py() == expected
def test_uuid_type_pickle():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
ty = UuidType()
ser = pickle.dumps(ty, protocol=proto)
del ty
ty = pickle.loads(ser)
wr = weakref.ref(ty)
assert ty.extension_name == "arrow.py_extension_type"
del ty
assert wr() is None
def test_ext_type_equality():
a = ParamExtType(5)
b = ParamExtType(6)
c = ParamExtType(6)
assert a != b
assert b == c
d = UuidType()
e = UuidType()
assert a != d
assert d == e
def test_ext_array_basics():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
arr.validate()
assert arr.type is ty
assert arr.storage.equals(storage)
def test_ext_array_lifetime():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
refs = [weakref.ref(ty), weakref.ref(arr), weakref.ref(storage)]
del ty, storage, arr
for ref in refs:
assert ref() is None
def test_ext_array_to_pylist():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar", None], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
assert arr.to_pylist() == [b"foo", b"bar", None]
def test_ext_array_errors():
ty = ParamExtType(4)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
with pytest.raises(TypeError, match="Incompatible storage type"):
pa.ExtensionArray.from_storage(ty, storage)
def test_ext_array_equality():
storage1 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
storage2 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
storage3 = pa.array([], type=pa.binary(16))
ty1 = UuidType()
ty2 = ParamExtType(16)
a = pa.ExtensionArray.from_storage(ty1, storage1)
b = pa.ExtensionArray.from_storage(ty1, storage2)
assert a.equals(b)
c = pa.ExtensionArray.from_storage(ty1, storage3)
assert not a.equals(c)
d = pa.ExtensionArray.from_storage(ty2, storage1)
assert not a.equals(d)
e = pa.ExtensionArray.from_storage(ty2, storage2)
assert d.equals(e)
f = pa.ExtensionArray.from_storage(ty2, storage3)
assert not d.equals(f)
def test_ext_array_wrap_array():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar", None], type=pa.binary(3))
arr = ty.wrap_array(storage)
arr.validate(full=True)
assert isinstance(arr, pa.ExtensionArray)
assert arr.type == ty
assert arr.storage == storage
storage = pa.chunked_array([[b"abc", b"def"], [b"ghi"]],
type=pa.binary(3))
arr = ty.wrap_array(storage)
arr.validate(full=True)
assert isinstance(arr, pa.ChunkedArray)
assert arr.type == ty
assert arr.chunk(0).storage == storage.chunk(0)
assert arr.chunk(1).storage == storage.chunk(1)
# Wrong storage type
storage = pa.array([b"foo", b"bar", None])
with pytest.raises(TypeError, match="Incompatible storage type"):
ty.wrap_array(storage)
# Not an array or chunked array
with pytest.raises(TypeError, match="Expected array or chunked array"):
ty.wrap_array(None)
def test_ext_scalar_from_array():
data = [b"0123456789abcdef", b"0123456789abcdef",
b"zyxwvutsrqponmlk", None]
storage = pa.array(data, type=pa.binary(16))
ty1 = UuidType()
ty2 = ParamExtType(16)
ty3 = UuidType2()
a = pa.ExtensionArray.from_storage(ty1, storage)
b = pa.ExtensionArray.from_storage(ty2, storage)
c = pa.ExtensionArray.from_storage(ty3, storage)
scalars_a = list(a)
assert len(scalars_a) == 4
assert ty1.__arrow_ext_scalar_class__() == UuidScalarType
assert type(a[0]) == UuidScalarType
assert type(scalars_a[0]) == UuidScalarType
for s, val in zip(scalars_a, data):
assert isinstance(s, pa.ExtensionScalar)
assert s.is_valid == (val is not None)
assert s.type == ty1
if val is not None:
assert s.value == pa.scalar(val, storage.type)
assert s.as_py() == UUID(bytes=val)
else:
assert s.value is None
scalars_b = list(b)
assert len(scalars_b) == 4
for sa, sb in zip(scalars_a, scalars_b):
assert isinstance(sb, pa.ExtensionScalar)
assert sa.is_valid == sb.is_valid
if sa.as_py() is None:
assert sa.as_py() == sb.as_py()
else:
assert sa.as_py().bytes == sb.as_py()
assert sa != sb
scalars_c = list(c)
assert len(scalars_c) == 4
for s, val in zip(scalars_c, data):
assert isinstance(s, pa.ExtensionScalar)
assert s.is_valid == (val is not None)
assert s.type == ty3
if val is not None:
assert s.value == pa.scalar(val, storage.type)
assert s.as_py() == val
else:
assert s.value is None
assert a.to_pylist() == [UUID(bytes=x) if x else None for x in data]
def test_ext_scalar_from_storage():
ty = UuidType()
s = pa.ExtensionScalar.from_storage(ty, None)
assert isinstance(s, pa.ExtensionScalar)
assert s.type == ty
assert s.is_valid is False
assert s.value is None
s = pa.ExtensionScalar.from_storage(ty, b"0123456789abcdef")
assert isinstance(s, pa.ExtensionScalar)
assert s.type == ty
assert s.is_valid is True
assert s.value == pa.scalar(b"0123456789abcdef", ty.storage_type)
s = pa.ExtensionScalar.from_storage(ty, pa.scalar(None, ty.storage_type))
assert isinstance(s, pa.ExtensionScalar)
assert s.type == ty
assert s.is_valid is False
assert s.value is None
s = pa.ExtensionScalar.from_storage(
ty, pa.scalar(b"0123456789abcdef", ty.storage_type))
assert isinstance(s, pa.ExtensionScalar)
assert s.type == ty
assert s.is_valid is True
assert s.value == pa.scalar(b"0123456789abcdef", ty.storage_type)
def test_ext_array_pickling():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
ser = pickle.dumps(arr, protocol=proto)
del ty, storage, arr
arr = pickle.loads(ser)
arr.validate()
assert isinstance(arr, pa.ExtensionArray)
assert arr.type == ParamExtType(3)
assert arr.type.storage_type == pa.binary(3)
assert arr.storage.type == pa.binary(3)
assert arr.storage.to_pylist() == [b"foo", b"bar"]
def test_ext_array_conversion_to_numpy():
storage1 = pa.array([1, 2, 3], type=pa.int64())
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
ty1 = IntegerType()
ty2 = ParamExtType(3)
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
result = arr1.to_numpy()
expected = np.array([1, 2, 3], dtype="int64")
np.testing.assert_array_equal(result, expected)
with pytest.raises(ValueError, match="zero_copy_only was True"):
arr2.to_numpy()
result = arr2.to_numpy(zero_copy_only=False)
expected = np.array([b"123", b"456", b"789"])
np.testing.assert_array_equal(result, expected)
@pytest.mark.pandas
def test_ext_array_conversion_to_pandas():
import pandas as pd
storage1 = pa.array([1, 2, 3], type=pa.int64())
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
ty1 = IntegerType()
ty2 = ParamExtType(3)
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
result = arr1.to_pandas()
expected = pd.Series([1, 2, 3], dtype="int64")
pd.testing.assert_series_equal(result, expected)
result = arr2.to_pandas()
expected = pd.Series([b"123", b"456", b"789"], dtype=object)
pd.testing.assert_series_equal(result, expected)
@pytest.fixture
def struct_w_ext_data():
storage1 = pa.array([1, 2, 3], type=pa.int64())
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
ty1 = IntegerType()
ty2 = ParamExtType(3)
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
sarr1 = pa.StructArray.from_arrays([arr1], ["f0"])
sarr2 = pa.StructArray.from_arrays([arr2], ["f1"])
return [sarr1, sarr2]
def test_struct_w_ext_array_to_numpy(struct_w_ext_data):
# ARROW-15291
# Check that we don't segfault when trying to build
# a numpy array from a StructArray with a field being
# an ExtensionArray
result = struct_w_ext_data[0].to_numpy(zero_copy_only=False)
expected = np.array([{'f0': 1}, {'f0': 2},
{'f0': 3}], dtype=object)
np.testing.assert_array_equal(result, expected)
result = struct_w_ext_data[1].to_numpy(zero_copy_only=False)
expected = np.array([{'f1': b'123'}, {'f1': b'456'},
{'f1': b'789'}], dtype=object)
np.testing.assert_array_equal(result, expected)
@pytest.mark.pandas
def test_struct_w_ext_array_to_pandas(struct_w_ext_data):
# ARROW-15291
# Check that we don't segfault when trying to build
# a Pandas dataframe from a StructArray with a field
# being an ExtensionArray
import pandas as pd
result = struct_w_ext_data[0].to_pandas()
expected = pd.Series([{'f0': 1}, {'f0': 2},
{'f0': 3}], dtype=object)
pd.testing.assert_series_equal(result, expected)
result = struct_w_ext_data[1].to_pandas()
expected = pd.Series([{'f1': b'123'}, {'f1': b'456'},
{'f1': b'789'}], dtype=object)
pd.testing.assert_series_equal(result, expected)
def test_cast_kernel_on_extension_arrays():
# test array casting
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(IntegerType(), storage)
# test that no allocation happens during identity cast
allocated_before_cast = pa.total_allocated_bytes()
casted = arr.cast(pa.int64())
assert pa.total_allocated_bytes() == allocated_before_cast
cases = [
(pa.int64(), pa.Int64Array),
(pa.int32(), pa.Int32Array),
(pa.int16(), pa.Int16Array),
(pa.uint64(), pa.UInt64Array),
(pa.uint32(), pa.UInt32Array),
(pa.uint16(), pa.UInt16Array)
]
for typ, klass in cases:
casted = arr.cast(typ)
assert casted.type == typ
assert isinstance(casted, klass)
# test chunked array casting
arr = pa.chunked_array([arr, arr])
casted = arr.cast(pa.int16())
assert casted.type == pa.int16()
assert isinstance(casted, pa.ChunkedArray)
def test_casting_to_extension_type_raises():
arr = pa.array([1, 2, 3, 4], pa.int64())
with pytest.raises(pa.ArrowNotImplementedError):
arr.cast(IntegerType())
def test_null_storage_type():
ext_type = AnnotatedType(pa.null(), {"key": "value"})
storage = pa.array([None] * 10, pa.null())
arr = pa.ExtensionArray.from_storage(ext_type, storage)
assert arr.null_count == 10
arr.validate(full=True)
def example_batch():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
return pa.RecordBatch.from_arrays([arr], ["exts"])
def check_example_batch(batch):
arr = batch.column(0)
assert isinstance(arr, pa.ExtensionArray)
assert arr.type.storage_type == pa.binary(3)
assert arr.storage.to_pylist() == [b"foo", b"bar"]
return arr
def test_ipc():
batch = example_batch()
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
arr = check_example_batch(batch)
assert arr.type == ParamExtType(3)
def test_ipc_unknown_type():
batch = example_batch()
buf = ipc_write_batch(batch)
del batch
orig_type = ParamExtType
try:
# Simulate the original Python type being unavailable.
# Deserialization should not fail but return a placeholder type.
del globals()['ParamExtType']
batch = ipc_read_batch(buf)
arr = check_example_batch(batch)
assert isinstance(arr.type, pa.UnknownExtensionType)
# Can be serialized again
buf2 = ipc_write_batch(batch)
del batch, arr
batch = ipc_read_batch(buf2)
arr = check_example_batch(batch)
assert isinstance(arr.type, pa.UnknownExtensionType)
finally:
globals()['ParamExtType'] = orig_type
# Deserialize again with the type restored
batch = ipc_read_batch(buf2)
arr = check_example_batch(batch)
assert arr.type == ParamExtType(3)
class PeriodArray(pa.ExtensionArray):
pass
class PeriodType(pa.ExtensionType):
def __init__(self, freq):
# attributes need to be set first before calling
# super init (as that calls serialize)
self._freq = freq
pa.ExtensionType.__init__(self, pa.int64(), 'test.period')
@property
def freq(self):
return self._freq
def __arrow_ext_serialize__(self):
return "freq={}".format(self.freq).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
serialized = serialized.decode()
assert serialized.startswith("freq=")
freq = serialized.split('=')[1]
return PeriodType(freq)
def __eq__(self, other):
if isinstance(other, pa.BaseExtensionType):
return (type(self) == type(other) and
self.freq == other.freq)
else:
return NotImplemented
class PeriodTypeWithClass(PeriodType):
def __init__(self, freq):
PeriodType.__init__(self, freq)
def __arrow_ext_class__(self):
return PeriodArray
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
freq = PeriodType.__arrow_ext_deserialize__(
storage_type, serialized).freq
return PeriodTypeWithClass(freq)
@pytest.fixture(params=[PeriodType('D'), PeriodTypeWithClass('D')])
def registered_period_type(request):
# setup
period_type = request.param
period_class = period_type.__arrow_ext_class__()
pa.register_extension_type(period_type)
yield period_type, period_class
# teardown
try:
pa.unregister_extension_type('test.period')
except KeyError:
pass
def test_generic_ext_type():
period_type = PeriodType('D')
assert period_type.extension_name == "test.period"
assert period_type.storage_type == pa.int64()
# default ext_class expected.
assert period_type.__arrow_ext_class__() == pa.ExtensionArray
def test_generic_ext_type_ipc(registered_period_type):
period_type, period_class = registered_period_type
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
# check the built array has exactly the expected clss
assert type(arr) == period_class
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
result = batch.column(0)
# check the deserialized array class is the expected one
assert type(result) == period_class
assert result.type.extension_name == "test.period"
assert arr.storage.to_pylist() == [1, 2, 3, 4]
# we get back an actual PeriodType
assert isinstance(result.type, PeriodType)
assert result.type.freq == 'D'
assert result.type == period_type
# using different parametrization as how it was registered
period_type_H = period_type.__class__('H')
assert period_type_H.extension_name == "test.period"
assert period_type_H.freq == 'H'
arr = pa.ExtensionArray.from_storage(period_type_H, storage)
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
result = batch.column(0)
assert isinstance(result.type, PeriodType)
assert result.type.freq == 'H'
assert type(result) == period_class
def test_generic_ext_type_ipc_unknown(registered_period_type):
period_type, _ = registered_period_type
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
buf = ipc_write_batch(batch)
del batch
# unregister type before loading again => reading unknown extension type
# as plain array (but metadata in schema's field are preserved)
pa.unregister_extension_type('test.period')
batch = ipc_read_batch(buf)
result = batch.column(0)
assert isinstance(result, pa.Int64Array)
ext_field = batch.schema.field('ext')
assert ext_field.metadata == {
b'ARROW:extension:metadata': b'freq=D',
b'ARROW:extension:name': b'test.period'
}
def test_generic_ext_type_equality():
period_type = PeriodType('D')
assert period_type.extension_name == "test.period"
period_type2 = PeriodType('D')
period_type3 = PeriodType('H')
assert period_type == period_type2
assert not period_type == period_type3
def test_generic_ext_type_register(registered_period_type):
# test that trying to register other type does not segfault
with pytest.raises(TypeError):
pa.register_extension_type(pa.string())
# register second time raises KeyError
period_type = PeriodType('D')
with pytest.raises(KeyError):
pa.register_extension_type(period_type)
@pytest.mark.parquet
def test_parquet_period(tmpdir, registered_period_type):
# Parquet support for primitive extension types
period_type, period_class = registered_period_type
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
table = pa.table([arr], names=["ext"])
import pyarrow.parquet as pq
filename = tmpdir / 'period_extension_type.parquet'
pq.write_table(table, filename)
# Stored in parquet as storage type but with extension metadata saved
# in the serialized arrow schema
meta = pq.read_metadata(filename)
assert meta.schema.column(0).physical_type == "INT64"
assert b"ARROW:schema" in meta.metadata
import base64
decoded_schema = base64.b64decode(meta.metadata[b"ARROW:schema"])
schema = pa.ipc.read_schema(pa.BufferReader(decoded_schema))
# Since the type could be reconstructed, the extension type metadata is
# absent.
assert schema.field("ext").metadata == {}
# When reading in, properly create extension type if it is registered
result = pq.read_table(filename)
assert result.schema.field("ext").type == period_type
assert result.schema.field("ext").metadata == {}
# Get the exact array class defined by the registered type.
result_array = result.column("ext").chunk(0)
assert type(result_array) is period_class
# When the type is not registered, read in as storage type
pa.unregister_extension_type(period_type.extension_name)
result = pq.read_table(filename)
assert result.schema.field("ext").type == pa.int64()
# The extension metadata is present for roundtripping.
assert result.schema.field("ext").metadata == {
b'ARROW:extension:metadata': b'freq=D',
b'ARROW:extension:name': b'test.period'
}
@pytest.mark.parquet
def test_parquet_extension_with_nested_storage(tmpdir):
# Parquet support for extension types with nested storage type
import pyarrow.parquet as pq
struct_array = pa.StructArray.from_arrays(
[pa.array([0, 1], type="int64"), pa.array([4, 5], type="int64")],
names=["left", "right"])
list_array = pa.array([[1, 2, 3], [4, 5]], type=pa.list_(pa.int32()))
mystruct_array = pa.ExtensionArray.from_storage(MyStructType(),
struct_array)
mylist_array = pa.ExtensionArray.from_storage(
MyListType(list_array.type), list_array)
orig_table = pa.table({'structs': mystruct_array,
'lists': mylist_array})
filename = tmpdir / 'nested_extension_storage.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column('structs').type == mystruct_array.type
assert table.column('lists').type == mylist_array.type
assert table == orig_table
@pytest.mark.parquet
def test_parquet_nested_extension(tmpdir):
# Parquet support for extension types nested in struct or list
import pyarrow.parquet as pq
ext_type = IntegerType()
storage = pa.array([4, 5, 6, 7], type=pa.int64())
ext_array = pa.ExtensionArray.from_storage(ext_type, storage)
# Struct of extensions
struct_array = pa.StructArray.from_arrays(
[storage, ext_array],
names=['ints', 'exts'])
orig_table = pa.table({'structs': struct_array})
filename = tmpdir / 'struct_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == struct_array.type
assert table == orig_table
# List of extensions
list_array = pa.ListArray.from_arrays([0, 1, None, 3], ext_array)
orig_table = pa.table({'lists': list_array})
filename = tmpdir / 'list_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == list_array.type
assert table == orig_table
# Large list of extensions
list_array = pa.LargeListArray.from_arrays([0, 1, None, 3], ext_array)
orig_table = pa.table({'lists': list_array})
filename = tmpdir / 'list_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == list_array.type
assert table == orig_table
@pytest.mark.parquet
def test_parquet_extension_nested_in_extension(tmpdir):
# Parquet support for extension<list<extension>>
import pyarrow.parquet as pq
inner_ext_type = IntegerType()
inner_storage = pa.array([4, 5, 6, 7], type=pa.int64())
inner_ext_array = pa.ExtensionArray.from_storage(inner_ext_type,
inner_storage)
list_array = pa.ListArray.from_arrays([0, 1, None, 3], inner_ext_array)
mylist_array = pa.ExtensionArray.from_storage(
MyListType(list_array.type), list_array)
orig_table = pa.table({'lists': mylist_array})
filename = tmpdir / 'ext_of_list_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == mylist_array.type
assert table == orig_table
def test_to_numpy():
period_type = PeriodType('D')
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
expected = storage.to_numpy()
result = arr.to_numpy()
np.testing.assert_array_equal(result, expected)
result = np.asarray(arr)
np.testing.assert_array_equal(result, expected)
# chunked array
a1 = pa.chunked_array([arr, arr])
a2 = pa.chunked_array([arr, arr], type=period_type)
expected = np.hstack([expected, expected])
for charr in [a1, a2]:
assert charr.type == period_type
for result in [np.asarray(charr), charr.to_numpy()]:
assert result.dtype == np.int64
np.testing.assert_array_equal(result, expected)
# zero chunks
charr = pa.chunked_array([], type=period_type)
assert charr.type == period_type
for result in [np.asarray(charr), charr.to_numpy()]:
assert result.dtype == np.int64
np.testing.assert_array_equal(result, np.array([], dtype='int64'))
def test_empty_take():
# https://issues.apache.org/jira/browse/ARROW-13474
ext_type = IntegerType()
storage = pa.array([], type=pa.int64())
empty_arr = pa.ExtensionArray.from_storage(ext_type, storage)
result = empty_arr.filter(pa.array([], pa.bool_()))
assert len(result) == 0
assert result.equals(empty_arr)
result = empty_arr.take(pa.array([], pa.int32()))
assert len(result) == 0
assert result.equals(empty_arr)
def test_array_constructor():
ext_type = IntegerType()
storage = pa.array([1, 2, 3], type=pa.int64())
expected = pa.ExtensionArray.from_storage(ext_type, storage)
result = pa.array([1, 2, 3], type=IntegerType())
assert result.equals(expected)
result = pa.array(np.array([1, 2, 3]), type=IntegerType())
assert result.equals(expected)
result = pa.array(np.array([1.0, 2.0, 3.0]), type=IntegerType())
assert result.equals(expected)
@pytest.mark.pandas
def test_array_constructor_from_pandas():
import pandas as pd
ext_type = IntegerType()
storage = pa.array([1, 2, 3], type=pa.int64())
expected = pa.ExtensionArray.from_storage(ext_type, storage)
result = pa.array(pd.Series([1, 2, 3]), type=IntegerType())
assert result.equals(expected)
result = pa.array(
pd.Series([1, 2, 3], dtype="category"), type=IntegerType()
)
assert result.equals(expected)
| {
"content_hash": "b9045d26b9429465c75aec887186239b",
"timestamp": "",
"source": "github",
"line_count": 942,
"max_line_length": 77,
"avg_line_length": 31.03184713375796,
"alnum_prop": 0.6408388067870826,
"repo_name": "icexelloss/arrow",
"id": "96e995c64181971fb8d61e317a58f99a3539a6c2",
"size": "30018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyarrow/tests/test_extension_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "31136"
},
{
"name": "C",
"bytes": "1303179"
},
{
"name": "C#",
"bytes": "1029129"
},
{
"name": "C++",
"bytes": "24357294"
},
{
"name": "CMake",
"bytes": "707501"
},
{
"name": "Cython",
"bytes": "1546990"
},
{
"name": "Dockerfile",
"bytes": "144408"
},
{
"name": "Emacs Lisp",
"bytes": "1064"
},
{
"name": "FreeMarker",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "4254915"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "Java",
"bytes": "6990057"
},
{
"name": "JavaScript",
"bytes": "127157"
},
{
"name": "Jinja",
"bytes": "19371"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "MATLAB",
"bytes": "40399"
},
{
"name": "Makefile",
"bytes": "31661"
},
{
"name": "Meson",
"bytes": "69508"
},
{
"name": "Objective-C++",
"bytes": "11472"
},
{
"name": "Perl",
"bytes": "3803"
},
{
"name": "Python",
"bytes": "3019333"
},
{
"name": "R",
"bytes": "1508383"
},
{
"name": "Ruby",
"bytes": "1596677"
},
{
"name": "Shell",
"bytes": "385605"
},
{
"name": "Thrift",
"bytes": "34246"
},
{
"name": "TypeScript",
"bytes": "1075563"
},
{
"name": "Vala",
"bytes": "24798"
}
],
"symlink_target": ""
} |
import uuid
from lxml import etree
import mock
import paramiko
import six
from jacket.storage import exception
from jacket.storage import ssh_utils
from jacket.storage import test
from jacket.tests.storage.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
from jacket.tests.storage.unit.volume.drivers.netapp.dataontap import fakes as fake
from jacket.storage.volume.drivers.netapp.dataontap.client import api as netapp_api
from jacket.storage.volume.drivers.netapp.dataontap.client import client_cmode
from jacket.storage.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
'vserver': 'fake_vserver'}
class NetAppCmodeClientTestCase(test.TestCase):
def setUp(self):
super(NetAppCmodeClientTestCase, self).setUp()
self.mock_object(client_cmode.Client, '_init_ssh_client')
with mock.patch.object(client_cmode.Client,
'get_ontapi_version',
return_value=(1, 20)):
self.client = client_cmode.Client(**CONNECTION_INFO)
self.client.ssh_client = mock.MagicMock()
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
self.vserver = CONNECTION_INFO['vserver']
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
self.mock_send_request = self.mock_object(self.client, 'send_request')
def tearDown(self):
super(NetAppCmodeClientTestCase, self).tearDown()
def _mock_api_error(self, code='fake'):
return mock.Mock(side_effect=netapp_api.NaApiError(code=code))
def test_has_records(self):
result = self.client._has_records(netapp_api.NaElement(
fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE))
self.assertTrue(result)
def test_has_records_not_found(self):
result = self.client._has_records(
netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE))
self.assertFalse(result)
def test_get_iscsi_target_details_no_targets(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([], target_list)
def test_get_iscsi_target_details(self):
expected_target = {
"address": "127.0.0.1",
"port": "1337",
"interface-enabled": "true",
"tpgroup-tag": "7777",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<iscsi-interface-list-entry-info>
<ip-address>%(address)s</ip-address>
<ip-port>%(port)s</ip-port>
<is-interface-enabled>%(interface-enabled)s</is-interface-enabled>
<tpgroup-tag>%(tpgroup-tag)s</tpgroup-tag>
</iscsi-interface-list-entry-info>
</attributes-list>
</results>""" % expected_target))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([expected_target], target_list)
def test_get_iscsi_service_details_with_no_iscsi_service(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
</results>"""))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertIsNone(iqn)
def test_get_iscsi_service_details(self):
expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<iscsi-service-info>
<node-name>%s</node-name>
</iscsi-service-info>
</attributes-list>
</results>""" % expected_iqn))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertEqual(expected_iqn, iqn)
def test_get_lun_list(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
luns = self.client.get_lun_list()
self.assertEqual(2, len(luns))
def test_get_lun_list_with_multiple_pages(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info> </lun-info>
<lun-info> </lun-info>
</attributes-list>
<next-tag>fake-next</next-tag>
</results>"""))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info> </lun-info>
<lun-info> </lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.side_effect = [response,
response_2]
luns = self.client.get_lun_list()
self.assertEqual(4, len(luns))
def test_get_lun_map_no_luns_mapped(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun_map = self.client.get_lun_map(path)
self.assertEqual([], lun_map)
def test_get_lun_map(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_lun_map = {
"initiator-group": "igroup",
"lun-id": "1337",
"vserver": "vserver",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
</results>""" % expected_lun_map))
self.connection.invoke_successfully.return_value = response
lun_map = self.client.get_lun_map(path)
self.assertEqual([expected_lun_map], lun_map)
def test_get_lun_map_multiple_pages(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_lun_map = {
"initiator-group": "igroup",
"lun-id": "1337",
"vserver": "vserver",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
<next-tag>blah</next-tag>
</results>""" % expected_lun_map))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
</results>""" % expected_lun_map))
self.connection.invoke_successfully.side_effect = [response,
response_2]
lun_map = self.client.get_lun_map(path)
self.assertEqual([expected_lun_map, expected_lun_map], lun_map)
def test_get_igroup_by_initiator_none_found(self):
initiator = 'initiator'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
igroup = self.client.get_igroup_by_initiators([initiator])
self.assertEqual([], igroup)
def test_get_igroup_by_initiators(self):
initiators = ['11:22:33:44:55:66:77:88']
expected_igroup = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>storage-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple(self):
initiators = ['11:22:33:44:55:66:77:88', '88:77:66:55:44:33:22:11']
expected_igroup = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>88:77:66:55:44:33:22:11</initiator-name>
</initiator-info>
</initiators>
<vserver>storage-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple_pages(self):
initiator = '11:22:33:44:55:66:77:88'
expected_igroup1 = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
expected_igroup2 = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup2',
}
response_1 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>storage-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<next-tag>12345</next-tag>
<num-records>1</num-records>
</results>""" % expected_igroup1))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>storage-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup2))
self.connection.invoke_successfully.side_effect = [response_1,
response_2]
igroups = self.client.get_igroup_by_initiators([initiator])
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup1),
netapp_utils.hashabledict(expected_igroup2)])
self.assertSetEqual(igroups, expected)
def test_clone_lun(self):
self.client.clone_lun(
'volume', 'fakeLUN', 'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(1, self.connection.invoke_successfully.call_count)
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN',
block_count=bc)
self.assertEqual(2, self.connection.invoke_successfully.call_count)
def test_get_lun_by_args(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args()
self.assertEqual(1, len(lun))
def test_get_lun_by_args_no_lun_found(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args()
self.assertEqual(0, len(lun))
def test_get_lun_by_args_with_args_specified(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args(path=path)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
query = actual_request.get_child_by_name('query')
lun_info_args = query.get_child_by_name('lun-info').get_children()
# Assert request is made with correct arguments
self.assertEqual('path', lun_info_args[0].get_name())
self.assertEqual(path, lun_info_args[0].get_content())
self.assertEqual(1, len(lun))
def test_file_assign_qos(self):
api_args = {
'volume': fake.FLEXVOL,
'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME,
'file': fake.NFS_FILE_PATH,
'vserver': self.vserver
}
self.client.file_assign_qos(
fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, fake.NFS_FILE_PATH)
self.mock_send_request.assert_has_calls([
mock.call('file-assign-qos', api_args, False)])
def test_set_lun_qos_policy_group(self):
api_args = {
'path': fake.LUN_PATH,
'qos-policy-group': fake.QOS_POLICY_GROUP_NAME,
}
self.client.set_lun_qos_policy_group(
fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME)
self.mock_send_request.assert_has_calls([
mock.call('lun-set-qos-policy-group', api_args)])
def test_provision_qos_policy_group_no_qos_policy_group_info(self):
self.client.provision_qos_policy_group(qos_policy_group_info=None)
self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
def test_provision_qos_policy_group_legacy_qos_policy_group_info(self):
self.client.provision_qos_policy_group(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
def test_provision_qos_policy_group_with_qos_spec_create(self):
self.mock_object(self.client,
'qos_policy_group_exists',
mock.Mock(return_value=False))
self.mock_object(self.client, 'qos_policy_group_create')
self.mock_object(self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO)
self.client.qos_policy_group_create.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)])
self.assertFalse(self.client.qos_policy_group_modify.called)
def test_provision_qos_policy_group_with_qos_spec_modify(self):
self.mock_object(self.client,
'qos_policy_group_exists',
mock.Mock(return_value=True))
self.mock_object(self.client, 'qos_policy_group_create')
self.mock_object(self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO)
self.assertFalse(self.client.qos_policy_group_create.called)
self.client.qos_policy_group_modify.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)])
def test_qos_policy_group_exists(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE)
result = self.client.qos_policy_group_exists(
fake.QOS_POLICY_GROUP_NAME)
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
},
},
'desired-attributes': {
'qos-policy-group-info': {
'policy-group': None,
},
},
}
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-get-iter', api_args, False)])
self.assertTrue(result)
def test_qos_policy_group_exists_not_found(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
result = self.client.qos_policy_group_exists(
fake.QOS_POLICY_GROUP_NAME)
self.assertFalse(result)
def test_qos_policy_group_create(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'max-throughput': fake.MAX_THROUGHPUT,
'vserver': self.vserver,
}
self.client.qos_policy_group_create(
fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-create', api_args, False)])
def test_qos_policy_group_modify(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'max-throughput': fake.MAX_THROUGHPUT,
}
self.client.qos_policy_group_modify(
fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-modify', api_args, False)])
def test_qos_policy_group_delete(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME
}
self.client.qos_policy_group_delete(
fake.QOS_POLICY_GROUP_NAME)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete', api_args, False)])
def test_qos_policy_group_rename(self):
new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME
api_args = {
'policy-group-name': fake.QOS_POLICY_GROUP_NAME,
'new-name': new_name,
}
self.client.qos_policy_group_rename(
fake.QOS_POLICY_GROUP_NAME, new_name)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-rename', api_args, False)])
def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=None)
self.assertEqual(0, mock_rename.call_count)
self.assertEqual(0, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
self.assertEqual(0, mock_rename.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_w_qos_spec(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
mock_log = self.mock_object(client_cmode.LOG, 'warning')
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
self.assertEqual(0, mock_log.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_exception_path(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_rename.side_effect = netapp_api.NaApiError
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
mock_log = self.mock_object(client_cmode.LOG, 'warning')
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
self.assertEqual(1, mock_log.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_remove_unused_qos_policy_groups(self):
mock_log = self.mock_object(client_cmode.LOG, 'debug')
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
},
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
self.client.remove_unused_qos_policy_groups()
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete-iter', api_args, False)])
self.assertEqual(0, mock_log.call_count)
def test_remove_unused_qos_policy_groups_api_error(self):
mock_log = self.mock_object(client_cmode.LOG, 'debug')
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
},
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
self.mock_send_request.side_effect = netapp_api.NaApiError
self.client.remove_unused_qos_policy_groups()
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete-iter', api_args, False)])
self.assertEqual(1, mock_log.call_count)
@mock.patch('storage.volume.drivers.netapp.utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_if_info_by_ip_not_found(self, mock_resolve_hostname):
fake_ip = '192.168.1.101'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.NotFound, self.client.get_if_info_by_ip,
fake_ip)
@mock.patch('storage.volume.drivers.netapp.utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_if_info_by_ip(self, mock_resolve_hostname):
fake_ip = '192.168.1.101'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<net-interface-info>
</net-interface-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
results = self.client.get_if_info_by_ip(fake_ip)
self.assertEqual(1, len(results))
def test_get_vol_by_junc_vserver_not_found(self):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.NotFound,
self.client.get_vol_by_junc_vserver,
fake_vserver, fake_junc)
def test_get_vol_by_junc_vserver(self):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
expected_flex_vol = 'fake_flex_vol'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(flex_vol)s</name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
</results>""" % {'flex_vol': expected_flex_vol}))
self.connection.invoke_successfully.return_value = response
actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver,
fake_junc)
self.assertEqual(expected_flex_vol, actual_flex_vol)
def test_clone_file(self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual(actual_request.get_child_by_name(
'destination-exists'), None)
def test_clone_file_when_destination_exists(self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
dest_exists=True)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual('true',
actual_request.get_child_by_name(
'destination-exists').get_content())
def test_clone_file_when_destination_exists_and_version_less_than_1_20(
self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 19)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
dest_exists=True)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertIsNone(actual_request.get_child_by_name(
'destination-exists'))
def test_get_file_usage(self):
expected_bytes = "2048"
fake_vserver = 'fake_vserver'
fake_path = 'fake_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<unique-bytes>%(unique-bytes)s</unique-bytes>
</results>""" % {'unique-bytes': expected_bytes}))
self.connection.invoke_successfully.return_value = response
actual_bytes = self.client.get_file_usage(fake_vserver, fake_path)
self.assertEqual(expected_bytes, actual_bytes)
def test_get_operational_network_interface_addresses(self):
expected_result = ['1.2.3.4', '99.98.97.96']
api_response = netapp_api.NaElement(
fake_client.GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE)
self.mock_send_request.return_value = api_response
address_list = (
self.client.get_operational_network_interface_addresses())
self.assertEqual(expected_result, address_list)
def test_get_flexvol_capacity(self):
expected_total_size = 1000
expected_available_size = 750
fake_flexvol_path = '/fake/vol'
api_response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-space-attributes>
<size-available>%(available_size)s</size-available>
<size-total>%(total_size)s</size-total>
</volume-space-attributes>
</volume-attributes>
</attributes-list>
</results>""" % {'available_size': expected_available_size,
'total_size': expected_total_size}))
self.mock_send_request.return_value = api_response
total_size, available_size = (
self.client.get_flexvol_capacity(fake_flexvol_path))
self.assertEqual(expected_total_size, total_size)
self.assertEqual(expected_available_size, available_size)
def test_get_aggregates(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_aggregates()
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', {}, enable_tunneling=False)])
self.assertListEqual(
[aggr.to_string() for aggr in api_response.get_child_by_name(
'attributes-list').get_children()],
[aggr.to_string() for aggr in result])
def test_get_aggregates_with_filters(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_SPACE_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-space-attributes': {
'size-total': None,
'size-available': None,
}
}
}
result = self.client._get_aggregates(
aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES,
desired_attributes=desired_attributes)
aggr_get_iter_args = {
'query': {
'aggr-attributes': {
'aggregate-name': '|'.join(
fake_client.VOLUME_AGGREGATE_NAMES),
}
},
'desired-attributes': desired_attributes
}
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', aggr_get_iter_args,
enable_tunneling=False)])
self.assertListEqual(
[aggr.to_string() for aggr in api_response.get_child_by_name(
'attributes-list').get_children()],
[aggr.to_string() for aggr in result])
def test_get_aggregates_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_aggregates()
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', {}, enable_tunneling=False)])
self.assertListEqual([], result)
def test_get_node_for_aggregate(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_NODE_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
mock.Mock(return_value=api_response))
result = self.client.get_node_for_aggregate(
fake_client.VOLUME_AGGREGATE_NAME)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-ownership-attributes': {
'home-name': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
self.assertEqual(fake_client.NODE_NAME, result)
def test_get_node_for_aggregate_none_requested(self):
result = self.client.get_node_for_aggregate(None)
self.assertIsNone(result)
def test_get_node_for_aggregate_api_not_found(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error(
netapp_api.EAPINOTFOUND)))
result = self.client.get_node_for_aggregate(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_node_for_aggregate_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.get_node_for_aggregate,
fake_client.VOLUME_AGGREGATE_NAME)
def test_get_node_for_aggregate_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_node_for_aggregate(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_performance_instance_uuids(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE)
result = self.client.get_performance_instance_uuids(
'system', fake_client.NODE_NAME)
expected = [fake_client.NODE_NAME + ':kernel:system']
self.assertEqual(expected, result)
perf_object_instance_list_info_iter_args = {
'objectname': 'system',
'query': {
'instance-info': {
'uuid': fake_client.NODE_NAME + ':*',
}
}
}
self.mock_send_request.assert_called_once_with(
'perf-object-instance-list-info-iter',
perf_object_instance_list_info_iter_args, enable_tunneling=False)
def test_get_performance_counters(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE)
instance_uuids = [
fake_client.NODE_NAMES[0] + ':kernel:system',
fake_client.NODE_NAMES[1] + ':kernel:system',
]
counter_names = ['avg_processor_busy']
result = self.client.get_performance_counters('system',
instance_uuids,
counter_names)
expected = [
{
'avg_processor_busy': '5674745133134',
'instance-name': 'system',
'instance-uuid': instance_uuids[0],
'node-name': fake_client.NODE_NAMES[0],
'timestamp': '1453412013',
}, {
'avg_processor_busy': '4077649009234',
'instance-name': 'system',
'instance-uuid': instance_uuids[1],
'node-name': fake_client.NODE_NAMES[1],
'timestamp': '1453412013'
},
]
self.assertEqual(expected, result)
perf_object_get_instances_args = {
'objectname': 'system',
'instance-uuids': [
{'instance-uuid': instance_uuid}
for instance_uuid in instance_uuids
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
self.mock_send_request.assert_called_once_with(
'perf-object-get-instances', perf_object_get_instances_args,
enable_tunneling=False)
def test_check_iscsi_initiator_exists_when_no_initiator_exists(self):
self.connection.invoke_successfully = mock.Mock(
side_effect=netapp_api.NaApiError)
initiator = fake_client.INITIATOR_IQN
initiator_exists = self.client.check_iscsi_initiator_exists(initiator)
self.assertFalse(initiator_exists)
def test_check_iscsi_initiator_exists_when_initiator_exists(self):
self.connection.invoke_successfully = mock.Mock()
initiator = fake_client.INITIATOR_IQN
initiator_exists = self.client.check_iscsi_initiator_exists(initiator)
self.assertTrue(initiator_exists)
def test_set_iscsi_chap_authentication_no_previous_initiator(self):
self.connection.invoke_successfully = mock.Mock()
self.mock_object(self.client, 'check_iscsi_initiator_exists',
mock.Mock(return_value=False))
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
self.mock_object(self.client.ssh_client, 'execute_command_with_prompt')
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
command = ('iscsi security create -vserver fake_vserver '
'-initiator-name iqn.2015-06.com.netapp:fake_iqn '
'-auth-type CHAP -user-name fake_user')
self.client.ssh_client.execute_command_with_prompt.assert_has_calls(
[mock.call(ssh, command, 'Password:', fake_client.PASSWORD)]
)
def test_set_iscsi_chap_authentication_with_preexisting_initiator(self):
self.connection.invoke_successfully = mock.Mock()
self.mock_object(self.client, 'check_iscsi_initiator_exists',
mock.Mock(return_value=True))
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
self.mock_object(self.client.ssh_client, 'execute_command_with_prompt')
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
command = ('iscsi security modify -vserver fake_vserver '
'-initiator-name iqn.2015-06.com.netapp:fake_iqn '
'-auth-type CHAP -user-name fake_user')
self.client.ssh_client.execute_command_with_prompt.assert_has_calls(
[mock.call(ssh, command, 'Password:', fake_client.PASSWORD)]
)
def test_set_iscsi_chap_authentication_with_ssh_exception(self):
self.connection.invoke_successfully = mock.Mock()
self.mock_object(self.client, 'check_iscsi_initiator_exists',
mock.Mock(return_value=True))
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__enter__.side_effect = paramiko.SSHException(
'Connection Failure')
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.assertRaises(exception.VolumeBackendAPIException,
self.client.set_iscsi_chap_authentication,
fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
def test_get_snapshot_if_snapshot_present_not_busy(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(
fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE)
self.mock_send_request.return_value = response
snapshot = self.client.get_snapshot(expected_vol_name,
expected_snapshot_name)
self.assertEqual(expected_vol_name, snapshot['volume'])
self.assertEqual(expected_snapshot_name, snapshot['name'])
self.assertEqual(set([]), snapshot['owners'])
self.assertFalse(snapshot['busy'])
def test_get_snapshot_if_snapshot_present_busy(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(
fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE)
self.mock_send_request.return_value = response
snapshot = self.client.get_snapshot(expected_vol_name,
expected_snapshot_name)
self.assertEqual(expected_vol_name, snapshot['volume'])
self.assertEqual(expected_snapshot_name, snapshot['name'])
self.assertEqual(set([]), snapshot['owners'])
self.assertTrue(snapshot['busy'])
def test_get_snapshot_if_snapshot_not_present(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_send_request.return_value = response
self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot,
expected_vol_name, expected_snapshot_name)
| {
"content_hash": "e004c1fb8fd9aff85fa295582ac92403",
"timestamp": "",
"source": "github",
"line_count": 1271,
"max_line_length": 83,
"avg_line_length": 40.83398898505114,
"alnum_prop": 0.5668208092485549,
"repo_name": "HybridF5/jacket",
"id": "88a8b0c0185f0ff86850d2989ee10a4f7ade508f",
"size": "52701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/storage/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._iot_security_solution_analytics_operations import build_get_request, build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IotSecuritySolutionAnalyticsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2019_08_01.aio.SecurityCenter`'s
:attr:`iot_security_solution_analytics` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(
self, resource_group_name: str, solution_name: str, **kwargs: Any
) -> _models.IoTSecuritySolutionAnalyticsModelList:
"""Use this method to get IoT security Analytics metrics in an array.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param solution_name: The name of the IoT Security solution. Required.
:type solution_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IoTSecuritySolutionAnalyticsModelList or the result of cls(response)
:rtype: ~azure.mgmt.security.v2019_08_01.models.IoTSecuritySolutionAnalyticsModelList
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01")) # type: Literal["2019-08-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.IoTSecuritySolutionAnalyticsModelList]
request = build_list_request(
resource_group_name=resource_group_name,
solution_name=solution_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("IoTSecuritySolutionAnalyticsModelList", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, solution_name: str, **kwargs: Any
) -> _models.IoTSecuritySolutionAnalyticsModel:
"""Use this method to get IoT Security Analytics metrics.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param solution_name: The name of the IoT Security solution. Required.
:type solution_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IoTSecuritySolutionAnalyticsModel or the result of cls(response)
:rtype: ~azure.mgmt.security.v2019_08_01.models.IoTSecuritySolutionAnalyticsModel
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01")) # type: Literal["2019-08-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.IoTSecuritySolutionAnalyticsModel]
request = build_get_request(
resource_group_name=resource_group_name,
solution_name=solution_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("IoTSecuritySolutionAnalyticsModel", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default"} # type: ignore
| {
"content_hash": "c9236f977544887ebf3d569e7f1b08b1",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 201,
"avg_line_length": 44.205882352941174,
"alnum_prop": 0.6754491017964072,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d2a8e768d80ed75e1dace307da988268e3a4406f",
"size": "8015",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/security/azure-mgmt-security/azure/mgmt/security/v2019_08_01/aio/operations/_iot_security_solution_analytics_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
import sys
import codecs
import commands
import shutil
reload(sys)
sys.setdefaultencoding('utf-8')
def GenerateTxt2Pic(files):
"""
将文本文件转换成图片
请注意,使用此功能会将在windows文件格式转换成linux的utf-8,直接覆盖源文件
你可以备份一下
"""
# fileName = os.path.abspath(__file__)
binPath = os.path.dirname(os.path.realpath(__file__))
basePath = os.path.dirname(binPath) + '/'
# print fileName
###以下几行是将windows格式的文件转换成linux可以cat的
allfileName = os.path.abspath(files)
# print allfileName
filename = os.path.basename(allfileName)
# print filename
fllepath = os.path.dirname(allfileName)
# print fllepath
bakfilename = fllepath +'/Bak-' + filename
try:
stat,res = commands.getstatusoutput('file %s' % files)
if stat == 0:
if 'ISO-8859' in res:
#要转换的时候备份一下
shutil.copy(files,bakfilename)
#stat,res = commands.getstatusoutput('enca -L zh_CN -x UTF-8 <%s> %s ' % (files,newfiles))
stat,res = commands.getstatusoutput('enca -L zh_CN -x UTF-8 %s' % files)
if stat == 0:
pass
except:
print "something wrong"
return False
##字体大小
sizeChar = 15
charColor = '#FFFFFF'
#backGround ='#000000' ##333333
backGround ='#333333'
count = len(open(r"%s" % files,'rU').readlines()) * sizeChar + sizeChar * 5
font = PIL.ImageFont.truetype('%s/fonts/simsun.ttc' % basePath , sizeChar)
try:
im = PIL.Image.new('RGB', (1200, count), backGround)
draw = PIL.ImageDraw.Draw(im)
#print(draw.textsize('Hello World!', font)) # (96, 10), 返回字符串将要占用的像素区域大小
except:
print "something wrong"
return False
ii = ''
with open('%s' % files ,"rb") as f:
for i in f.readlines():
ii = ii + u'%s' % i
try:
draw.multiline_text((15, 15), '%s' % ii , charColor, font)
# im.show()
im.save("%s.jpg" % files)
except:
print "something wrong"
return False
if os.path.exists("%s.jpg" % files):
return True
else:
return False
if __name__ == '__main__':
finename = 'report.csv'
if GenerateTxt2Pic(finename):
print 'ok'
else:
print 'no'
| {
"content_hash": "141ff8b601db45e52fdcb611c216fdaa",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 106,
"avg_line_length": 27.53488372093023,
"alnum_prop": 0.5717905405405406,
"repo_name": "lichengshuang/createvhost",
"id": "135df6e760e6477b446c781e60a36eb008f9508b",
"size": "2583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/makepic/bin/generateTxtFile2Pic.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "84170"
},
{
"name": "C",
"bytes": "25320"
},
{
"name": "CSS",
"bytes": "1323"
},
{
"name": "HTML",
"bytes": "26691"
},
{
"name": "JavaScript",
"bytes": "205981"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "915418"
},
{
"name": "Roff",
"bytes": "6734"
},
{
"name": "Shell",
"bytes": "1548839"
},
{
"name": "Vim script",
"bytes": "56257"
}
],
"symlink_target": ""
} |
from feedbackserver.models import *
from django.contrib import admin
admin.site.register(CI_ResponsibleParty)
admin.site.register(Tags)
admin.site.register(GVQ_UserRoleCode)
admin.site.register(CI_Date)
admin.site.register(CI_Citation)
admin.site.register(CI_Contact)
admin.site.register(MD_Identifier)
admin.site.register(GVQ_UserInformation)
#admin.site.register(GVQ_ScopeCode)
admin.site.register(GVQ_FeedbackTarget)
admin.site.register(GVQ_DataFocus)
#admin.site.register(GVQ_UserComment)
admin.site.register(GVQ_Rating)
admin.site.register(GVQ_FeedbackItem)
admin.site.register(ApplicationDomain)
admin.site.register(DomainURN)
admin.site.register(GVQ_UsageReport)
#admin.site.register(DQ_DataQuality)
admin.site.register(CI_OnlineResource)
admin.site.register(CI_Series)
admin.site.register(GVQ_Publication)
#admin.site.register(GVQ_PublicationPurposeCode)
#admin.site.register(GVQ_PublicationCategoryCode)
#admin.site.register(GVQ_ReportAspectCode)
admin.site.register(GVQ_DiscoveredIssue)
| {
"content_hash": "9f8f41614b490913459a28d9886e4347",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 49,
"avg_line_length": 32.29032258064516,
"alnum_prop": 0.8321678321678322,
"repo_name": "mvdbroek/geo-userfeedback",
"id": "b58f23667048cad6f20de72d4e5e674c8c2b5b41",
"size": "1260",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/feedbackserver/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7351"
},
{
"name": "HTML",
"bytes": "18172"
},
{
"name": "JavaScript",
"bytes": "29806"
},
{
"name": "Python",
"bytes": "142200"
}
],
"symlink_target": ""
} |
from __future__ import annotations
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "distributed-"
cfg.versionfile_source = "distributed/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: dict = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except FileNotFoundError:
continue
except OSError as e:
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "main".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces[
"error"
] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| {
"content_hash": "1cf8e986ecabee1d47d862291f2ac315",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 88,
"avg_line_length": 32.959706959706956,
"alnum_prop": 0.5722938430762392,
"repo_name": "dask/distributed",
"id": "a37b7c1e8d45f4819fcc603c8c088f84bbfa01c7",
"size": "18344",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "distributed/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4220"
},
{
"name": "HTML",
"bytes": "16583"
},
{
"name": "JavaScript",
"bytes": "9337"
},
{
"name": "Jinja",
"bytes": "17081"
},
{
"name": "Python",
"bytes": "3746516"
},
{
"name": "Shell",
"bytes": "2030"
}
],
"symlink_target": ""
} |
test.assert_equals(is_opposite('ab', 'AB'), True)
test.assert_equals(is_opposite('aB', 'Ab'), True)
test.assert_equals(is_opposite('aBcd', 'AbCD'), True)
test.assert_equals(is_opposite('AB', 'Ab'), False)
test.assert_equals(is_opposite('', ''), False)
| {
"content_hash": "fa3bee13b69ff43c71321615df60f810",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 50.4,
"alnum_prop": 0.6825396825396826,
"repo_name": "RevansChen/online-judge",
"id": "8c1be713e30444176fa43af43ee61a38f375ba76",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codewars/8kyu/number-1-are-they-opposite/Python/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
} |
import unittest
import boto3
import moto
@moto.mock_s3()
def setUpModule():
bucket = boto3.resource('s3').create_bucket(Bucket='mybucket')
bucket.wait_until_exists()
@moto.mock_s3()
def tearDownModule():
resource = boto3.resource('s3')
bucket = resource.Bucket('mybucket')
try:
bucket.delete()
except resource.meta.client.exceptions.NoSuchBucket:
pass
bucket.wait_until_not_exists()
@moto.mock_s3()
class Test(unittest.TestCase):
def test(self):
resource = boto3.resource('s3')
bucket = resource.Bucket('mybucket')
self.assertEqual(bucket.name, 'mybucket')
expected = b'hello'
resource.Object('mybucket', 'mykey').put(Body=expected)
actual = resource.Object('mybucket', 'mykey').get()['Body'].read()
self.assertEqual(expected, actual)
def tearDown(self):
boto3.resource('s3').Object('mybucket', 'mykey').delete()
| {
"content_hash": "7c29ea5ee907a3b4038cda7375136674",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 22.975609756097562,
"alnum_prop": 0.6454352441613588,
"repo_name": "mpenkov/smart_open",
"id": "da0c3fef4c663bd53b02dec42c12cb205c567e2c",
"size": "942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smart_open/tests/test_sanity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321840"
},
{
"name": "Shell",
"bytes": "4911"
}
],
"symlink_target": ""
} |
import logging
log = logging.getLogger(__name__)
class I2C_CommunicationError(Exception):
error_texts = {
# 0: 'success',
1: 'data too long to fit in transmit buffer',
2: 'received NACK on transmit of address',
3: 'received NACK on transmit of data',
4: 'other error',
}
def __init__(self, error_code):
self.error_code = error_code
Exception.__init__(self, self.error_texts[error_code])
class I2C_Base(object):
''
bus_initialised = False
address = None
def _begin(self):
if not self.bus_initialised:
self.wire.begin(self.address)
self.bus_initialised = True
class I2C_Master(I2C_Base):
"""High level interface for I2C Master."""
def __init__(self, wire):
self.wire = wire
def request(self, address, quantity):
"""Used by the master to request bytes from a slave device.
:param address: the 7-bit address of the device to request bytes from
:param quantity: the number of bytes to request
:param stop: boolean. true will send a stop message after the request, releasing the bus. false will continually send a restart after the request, keeping the connection active.
:returns: list : the list of bytes returned from the slave device
http://arduino.cc/en/Reference/WireRequestFrom
"""
self._begin()
n = self.wire.requestFrom(address, quantity, stop=True)
if n < quantity:
log.info('slave sent less bytes than requested')
ls = n * [None]
for i in range(n):
ls[i] = self.wire.read()
return ls
def send(self, address, data):
""""""
self._begin()
self.wire.beginTransmission(address)
for b in data:
self.wire.write(b)
error_code = self.wire.endTransmission()
if error_code != 0:
raise I2C_CommunicationError(error_code)
def scan(self):
"""The i2c_scanner uses the return value of the Write.endTransmisstion
to see if a device did acknowledge to the address.
original source: http://playground.arduino.cc/Main/I2cScanner#.Uxs4Wdt4iJM
"""
self._begin()
ls = []
for address in range(128):
try:
self.send(address, [])
ls.append(address)
except I2C_CommunicationError:
pass
return ls
class I2C_Slave(I2C_Base):
"""High level interface for I2C Slave."""
def __init__(self, wire, address):
'''
:param address: slave address
'''
self.wire = wire
self.address = address
def receive(self):
self._begin()
n = self.wire.available()
ls = n * [None]
for i in range(n):
ls[i] = self.wire.read()
return ls
def send(self, data):
""""""
self._begin()
for b in data:
self.wire.write(b)
| {
"content_hash": "4f78049554b79fffd38ddb1b72acc031",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 185,
"avg_line_length": 27.37272727272727,
"alnum_prop": 0.5715709066755231,
"repo_name": "nanpy/nanpy",
"id": "dd1eaa8b343cea60fa07654debb02f06b4eb5d35",
"size": "3011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nanpy/i2c.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125648"
}
],
"symlink_target": ""
} |
import argparse
import glob
import os
import os.path as op
import sys
import time
try:
from PyQt4 import QtGui, QtCore
except ImportError:
from PySide import QtGui, QtCore
Qt = QtCore.Qt
import numpy as np
import pyqtgraph
from pyqtgraph import dockarea
import mt
import utils
def respfunc_viewer(path):
app = QtGui.QApplication([])
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
win = QtGui.QMainWindow()
win.setWindowTitle("MT response function data viewer")
darea = dockarea.DockArea()
w = QtGui.QWidget()
win.setCentralWidget(darea)
taglist = QtGui.QListWidget(win)
taglist.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
taglist_dock = dockarea.Dock("Tags")
taglist_dock.addWidget(taglist)
darea.addDock(taglist_dock)
sitelist = QtGui.QListWidget()
sitelist.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
sitelist_dock = dockarea.Dock("Tree...")
sitelist_dock.addWidget(sitelist)
darea.addDock(sitelist_dock, "left", taglist_dock)
resplot = pyqtgraph.PlotWidget()
resplot_dock = dockarea.Dock("APPARENT RESISTIVITY")
resplot_dock.addWidget(resplot)
darea.addDock(resplot_dock, "left", sitelist_dock)
phaseplot = pyqtgraph.PlotWidget()
phaseplot_dock = dockarea.Dock("PHASE")
phaseplot_dock.addWidget(phaseplot)
darea.addDock(phaseplot_dock, "bottom", resplot_dock)
default_pen = [[(255,255,255,90)], dict(width=1)]
select_pen = [["r"], dict(width=1.5)]
skipflag_pen = [[(255,255,255,30)], dict(width=0.5)]
resplotitem = resplot.getPlotItem()
phaseplotitem = phaseplot.getPlotItem()
resplotitem.invertX(True)
phaseplotitem.invertX(True)
resplotitem.setLogMode(x=True, y=True)
phaseplotitem.setLogMode(x=True, y=False)
phaseplotitem.vb.setXLink(resplotitem.vb)
resplotitem.setYRange(np.log10(0.1), np.log10(1000))
phaseplotitem.setYRange(0, 90)
resvb = resplotitem.vb
phasevb = phaseplotitem.vb
data = utils.AttrDict()
tagfns = glob.glob(op.join(path, "*-cal.json"))
tag2fn = {}
fn2tag = {}
sites = set()
tagfns.sort()
data = utils.AttrDict()
with open(op.join(path, "maskedfreqs.json"), mode="r") as f:
maskedfreqs = utils.read_json(f)
maskedlines = utils.AttrDict()
datasymbols = utils.AttrDict()
psymbols = utils.AttrDict({
"xy": dict(pen=None, symbol="o", symbolBrush="b"),
"yx": dict(pen=None, symbol="s", symbolBrush="r")
})
plines = utils.AttrDict({
"xy": dict(pen="b"),
"yx": dict(pen="r")
})
plotpens = utils.AttrDict({"xy": "b", "yx": "r",})
plotsymbols = utils.AttrDict({"xy": "o", "yx": "s"})
def plot(tag):
if not hasattr(datasymbols[tag], "res_xy"):
datasymbols[tag].res_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_xy, **psymbols.xy)
datasymbols[tag].res_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_yx, **psymbols.yx)
datasymbols[tag].phase_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_xy, **psymbols.xy)
datasymbols[tag].phase_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_yx, **psymbols.yx)
maskedlines[tag].res_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_xy, **plines.xy)
maskedlines[tag].res_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_yx, **plines.yx)
maskedlines[tag].phase_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_xy, **plines.xy)
maskedlines[tag].phase_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_yx, **plines.yx)
resplotitem.addItem(datasymbols[tag].res_xy)
resplotitem.addItem(datasymbols[tag].res_yx)
resplotitem.addItem(maskedlines[tag].res_xy)
resplotitem.addItem(maskedlines[tag].res_yx)
phaseplotitem.addItem(datasymbols[tag].phase_xy)
phaseplotitem.addItem(datasymbols[tag].phase_yx)
phaseplotitem.addItem(maskedlines[tag].phase_xy)
phaseplotitem.addItem(maskedlines[tag].phase_yx)
for i, freq in enumerate(data[tag].freqs):
if maskedfreqs[tag]["masks"][i] == 0:
data[tag].freqs[i] = float(maskedfreqs[tag]["freqs"][i])
else:
data[tag].freqs[i] = np.nan
maskedlines[tag].res_xy.setData(data[tag].freqs, data[tag].res_xy)
maskedlines[tag].res_yx.setData(data[tag].freqs, data[tag].res_yx)
maskedlines[tag].phase_xy.setData(data[tag].freqs, data[tag].phase_xy)
maskedlines[tag].phase_yx.setData(data[tag].freqs, data[tag].phase_yx)
progress = QtGui.QProgressDialog("Loading data...", "Abort", 0, len(tagfns), win)
progress.setWindowModality(QtCore.Qt.WindowModal)
for i, tagfn in enumerate(tagfns):
progress.setValue(i)
tag = op.basename(tagfn).replace("-cal.json", "")
tag2fn[tag] = tagfn
fn2tag[tagfn] = tag
site = tag.split("-")[0]
sites.add(site)
data[tag] = utils.read_json(tagfn)
if not tag in maskedfreqs:
maskedfreqs[tag] = utils.AttrDict({"freqs": data[tag].freqs.copy(), "masks": np.empty_like(data[tag].freqs) * 0})
if not tag in maskedlines:
maskedlines[tag] = utils.AttrDict()
datasymbols[tag] = utils.AttrDict()
plot(tag)
if progress.wasCanceled():
break
progress.setValue(len(tagfns))
resfreqselect = pyqtgraph.LinearRegionItem([0,-1])
phasefreqselect = pyqtgraph.LinearRegionItem([0,-1])
resplotitem.addItem(resfreqselect)
phaseplotitem.addItem(phasefreqselect)
def res_region_moved():
phasefreqselect.setRegion(resfreqselect.getRegion())
def phase_region_moved():
resfreqselect.setRegion(phasefreqselect.getRegion())
resfreqselect.sigRegionChanged.connect(res_region_moved)
phasefreqselect.sigRegionChanged.connect(phase_region_moved)
def populate_tag_list(filter_sites=None):
if filter_sites:
tags = [t for t in tag2fn.keys() if t.split("-")[0] in filter_sites]
else:
tags = sorted(tag2fn.keys())
tags.sort()
taglist.clear()
for tag in tags:
# print tag
tagitem = QtGui.QListWidgetItem(taglist)
tagitem.setText(tag)
plot_per_tag_list()
print
def plot_per_tag_list():
tags = [t.text() for t in taglist.selectedItems()]
if not tags:
tags = [t.text() for t in [taglist.item(i) for i in xrange(taglist.count())]]
for plotitemtag, tagitems in datasymbols.items():
if plotitemtag in tags:
for item_name, item in tagitems.items():
item.setSymbol(plotsymbols[item_name[-2:]])
# item.setPen(None)#plotpens[item_name[-2:]])
else:
for item in tagitems.values():
item.setSymbol(None)
# item.setPen(None)
for plotitemtag, tagitems in maskedlines.items():
if plotitemtag in tags:
for item_name, item in tagitems.items():
item.setPen(plotpens[item_name[-2:]])
else:
for item in tagitems.values():
item.setPen(None)
def selected_site_names():
return [s.text() for s in sitelist.selectedItems()]
def pick_site():
newsites = selected_site_names()
populate_tag_list(newsites)
# plot_per_tag_list()
def toggle_selected_mask(value):
tags = [str(t.text()) for t in taglist.selectedItems()]
log_mask_range = resfreqselect.getRegion()
fmin = 10 ** log_mask_range[0]
fmax = 10 ** log_mask_range[1]
for tag in tags:
for i, freq in enumerate(maskedfreqs[tag]["freqs"]):
if freq >= fmin and freq <= fmax:
maskedfreqs[tag]["masks"][i] = value
plot(tag)
print log_mask_range, tags, "\n"
disable = QtGui.QPushButton("&Delete selected frequencies")
enable = QtGui.QPushButton("&Enable selected frequencies")
sitelist_dock.addWidget(disable)
sitelist_dock.addWidget(enable)
disable.clicked.connect(lambda: toggle_selected_mask(1))
enable.clicked.connect(lambda: toggle_selected_mask(0))
# def generate_key_press_event_handler(self, vb, event):
# vb.keyPressEvent(self, event)
# if event.key() is Qt.Key_X:
# toggle_selected_mask(mode="xy")
# elif event.key() is Qt.Key_Y:
# toggle_selected_mask(mode="yx")
# resplotitem.vb.keyPressEvent = lambda
populate_tag_list()
sites = sorted(list(sites))
for site in sites:
siteitem = QtGui.QListWidgetItem(sitelist)
siteitem.setText(site)
sitelist.itemSelectionChanged.connect(pick_site)
taglist.itemSelectionChanged.connect(plot_per_tag_list)
def cleanup():
with open(op.join(path, "maskedfreqs.json"), mode="w") as f:
utils.write_json(maskedfreqs, f)
win.showMaximized()
app.aboutToQuit.connect(cleanup)
app.exec_()
def main():
parser = argparse.ArgumentParser("MT response function data viewer")
parser.add_argument("path")
args = parser.parse_args(sys.argv[1:])
return respfunc_viewer(args.path)
if __name__ == "__main__":
main()
| {
"content_hash": "b12716e21b2a604edc036aa608fe511c",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 125,
"avg_line_length": 35.08791208791209,
"alnum_prop": 0.6260569996868149,
"repo_name": "kinverarity1/mtwaffle",
"id": "8fa26d8bb36a293adc8b74ff73f850fdf09304e2",
"size": "9579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mtwaffle/rfviewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "204318"
},
{
"name": "Python",
"bytes": "73149"
}
],
"symlink_target": ""
} |
import re
__all__ = ["Data"]
class Data:
"""Store data from an individual line received on IRC."""
def __init__(self, my_nick, line, msgtype):
self._my_nick = my_nick.lower()
self._line = line
self._msgtype = msgtype
self._is_private = self._is_command = False
self._msg = self._command = self._trigger = None
self._args = []
self._kwargs = {}
self._parse()
def __repr__(self):
"""Return the canonical string representation of the Data."""
res = "Data(my_nick={0!r}, line={1!r})"
return res.format(self.my_nick, self.line)
def __str__(self):
"""Return a nice string representation of the Data."""
return "<Data of {0!r}>".format(" ".join(self.line))
def _parse(self):
"""Parse a line from IRC into its components as instance attributes."""
self._chan = self.line[2]
try:
sender = re.findall(r":(.*?)!(.*?)@(.*?)\Z", self.line[0])[0]
except IndexError:
self._host = self.line[0][1:]
self._nick = self._ident = self._reply_nick = "*"
return
self._nick, self._ident, self._host = sender
self._reply_nick = self._nick
if self._msgtype in ["PRIVMSG", "NOTICE"]:
if self.chan.lower() == self.my_nick:
# This is a privmsg to us, so set 'chan' as the nick of the
# sender instead of the 'channel', which is ourselves:
self._chan = self._nick
self._is_private = True
self._msg = " ".join(self.line[3:])[1:]
if self._msgtype == "PRIVMSG":
self._parse_args()
self._parse_kwargs()
def _parse_args(self):
"""Parse command arguments from the message.
self.msg is converted into the string self.command and the argument
list self.args if the message starts with a "trigger" ("!", ".", or the
bot's name); self.is_command will be set to True, and self.trigger will
store the trigger string. Otherwise, is_command will be set to False.
"""
self._args = self.msg.strip().split()
try:
command_uc = self.args.pop(0)
self._command = command_uc.lower()
except IndexError:
return
# e.g. "!command>user arg1 arg2"
if ">" in self.command:
command_uc, self._reply_nick = command_uc.split(">", 1)
self._command = command_uc.lower()
if self.command.startswith("!") or self.command.startswith("."):
# e.g. "!command arg1 arg2"
self._is_command = True
self._trigger = self.command[0]
self._command = self.command[1:] # Strip the "!" or "."
elif re.match(r"{0}\W*?$".format(re.escape(self.my_nick)),
self.command, re.U):
# e.g. "EarwigBot, command arg1 arg2"
self._is_command = True
self._trigger = self.my_nick
try:
self._command = self.args.pop(0).lower()
except IndexError:
self._command = ""
else:
try:
if self.msg[-1] == "." and self.msg[-2] != ".":
if self.args:
self.args[-1] = self.args[-1][:-1]
else:
self._command = self.command[:-1]
except IndexError:
pass
# e.g. "!command >user arg1 arg2"
if self.args and self.args[0].startswith(">"):
self._reply_nick = self.args.pop(0)[1:]
def _parse_kwargs(self):
"""Parse keyword arguments embedded in self.args.
Parse a command given as "!command key1=value1 key2=value2..." into a
dict, self.kwargs, like {'key1': 'value2', 'key2': 'value2'...}.
"""
for arg in self.args:
try:
key, value = re.findall(r"^(.*?)\=(.*?)$", arg)[0]
except IndexError:
continue
if key and value:
self.kwargs[key] = value
@property
def my_nick(self):
"""Our nickname, *not* the nickname of the sender."""
return self._my_nick
@property
def line(self):
"""The full message received on IRC, including escape characters."""
return self._line
@property
def chan(self):
"""Channel the message was sent from.
This will be equal to :py:attr:`nick` if the message is a private
message.
"""
return self._chan
@property
def nick(self):
"""Nickname of the sender."""
return self._nick
@property
def ident(self):
"""`Ident <https://en.wikipedia.org/wiki/Ident_protocol>`_ of the sender."""
return self._ident
@property
def host(self):
"""Hostname of the sender."""
return self._host
@property
def reply_nick(self):
"""Nickname of the person to reply to. Sender by default."""
return self._reply_nick
@property
def msg(self):
"""Text of the sent message, if it is a message, else ``None``."""
return self._msg
@property
def is_private(self):
"""``True`` if this message was sent to us *only*, else ``False``."""
return self._is_private
@property
def is_command(self):
"""Boolean telling whether or not this message is a bot command.
A message is considered a command if and only if it begins with the
character ``"!"``, ``"."``, or the bot's name followed by optional
punctuation and a space (so ``EarwigBot: do something``, ``EarwigBot,
do something``, and ``EarwigBot do something`` are all valid).
"""
return self._is_command
@property
def command(self):
"""If the message is a command, this is the name of the command used.
See :py:attr:`is_command <self.is_command>` for when a message is
considered a command. If it's not a command, this will be set to
``None``.
"""
return self._command
@property
def trigger(self):
"""If this message is a command, this is what triggered it.
It can be either "!" (``"!help"``), "." (``".help"``), or the bot's
name (``"EarwigBot: help"``). Otherwise, it will be ``None``."""
return self._trigger
@property
def args(self):
"""List of all arguments given to this command.
For example, the message ``"!command arg1 arg2 arg3=val3"`` will
produce the args ``["arg1", "arg2", "arg3=val3"]``. This is empty if
the message was not a command or if it doesn't have arguments.
"""
return self._args
@property
def kwargs(self):
"""Dictionary of keyword arguments given to this command.
For example, the message ``"!command arg1=val1 arg2=val2"`` will
produce the kwargs ``{"arg1": "val1", "arg2": "val2"}``. This is empty
if the message was not a command or if it doesn't have keyword
arguments.
"""
return self._kwargs
def serialize(self):
"""Serialize this object into a tuple and return it."""
return (self._my_nick, self._line, self._msgtype)
@classmethod
def unserialize(cls, data):
"""Return a new Data object built from a serialized tuple."""
return cls(*data)
| {
"content_hash": "db3b34217973364708d7732284e54512",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 84,
"avg_line_length": 34.036199095022624,
"alnum_prop": 0.5382876894442967,
"repo_name": "earwig/earwigbot",
"id": "a89d9b9e489d0aaf41a92cb11861a7227479bf04",
"size": "8671",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "earwigbot/irc/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "476431"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from db import engine
Base = declarative_base()
class TiceCache(Base):
__tablename__ = 'tice'
cardnum = Column(Integer, primary_key=True)
text = Column(String(4096), nullable=False)
date = Column(Integer, nullable=False)
if __name__ == '__main__':
Base.metadata.create_all(engine) | {
"content_hash": "f10cc5f30b21c1dde285bfe9530d4328",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 27.733333333333334,
"alnum_prop": 0.7211538461538461,
"repo_name": "HeraldStudio/webservice-py",
"id": "0127ce3d084837625a419c28cb39c9fec626d5c2",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mod/models/tice_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5918"
},
{
"name": "Python",
"bytes": "245406"
}
],
"symlink_target": ""
} |
class Solution:
def numberOfSteps (self, num: int) -> int:
steps = 0;
while num is not 0:
num = num - 1 if num&1 else int(num/2)
steps += 1
return steps
| {
"content_hash": "b02d0fd5e30ce589a4d09d36cb527220",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 25.75,
"alnum_prop": 0.5,
"repo_name": "AHJenin/acm-type-problems",
"id": "3e953dc790552a32f42e8195f54f77963032f4a7",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/AC/Easy/number-of-steps-to-reduce-a-number-to-zero.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "76491"
},
{
"name": "C++",
"bytes": "2244531"
},
{
"name": "Java",
"bytes": "6724"
},
{
"name": "Python",
"bytes": "2227"
}
],
"symlink_target": ""
} |
"""
Tests For Cells Utility methods
"""
import inspect
import random
from nova.cells import utils as cells_utils
from nova import db
from nova import test
class CellsUtilsTestCase(test.TestCase):
"""Test case for Cells utility methods."""
def test_get_instances_to_sync(self):
fake_context = 'fake_context'
call_info = {'get_all': 0, 'shuffle': 0}
def random_shuffle(_list):
call_info['shuffle'] += 1
def instance_get_all_by_filters(context, filters,
sort_key, sort_order):
self.assertEqual(context, fake_context)
self.assertEqual(sort_key, 'deleted')
self.assertEqual(sort_order, 'asc')
call_info['got_filters'] = filters
call_info['get_all'] += 1
return ['fake_instance1', 'fake_instance2', 'fake_instance3']
self.stubs.Set(db, 'instance_get_all_by_filters',
instance_get_all_by_filters)
self.stubs.Set(random, 'shuffle', random_shuffle)
instances = cells_utils.get_instances_to_sync(fake_context)
self.assertTrue(inspect.isgenerator(instances))
self.assertTrue(len([x for x in instances]), 3)
self.assertEqual(call_info['get_all'], 1)
self.assertEqual(call_info['got_filters'], {})
self.assertEqual(call_info['shuffle'], 0)
instances = cells_utils.get_instances_to_sync(fake_context,
shuffle=True)
self.assertTrue(inspect.isgenerator(instances))
self.assertTrue(len([x for x in instances]), 3)
self.assertEqual(call_info['get_all'], 2)
self.assertEqual(call_info['got_filters'], {})
self.assertEqual(call_info['shuffle'], 1)
instances = cells_utils.get_instances_to_sync(fake_context,
updated_since='fake-updated-since')
self.assertTrue(inspect.isgenerator(instances))
self.assertTrue(len([x for x in instances]), 3)
self.assertEqual(call_info['get_all'], 3)
self.assertEqual(call_info['got_filters'],
{'changes-since': 'fake-updated-since'})
self.assertEqual(call_info['shuffle'], 1)
instances = cells_utils.get_instances_to_sync(fake_context,
project_id='fake-project',
updated_since='fake-updated-since', shuffle=True)
self.assertTrue(inspect.isgenerator(instances))
self.assertTrue(len([x for x in instances]), 3)
self.assertEqual(call_info['get_all'], 4)
self.assertEqual(call_info['got_filters'],
{'changes-since': 'fake-updated-since',
'project_id': 'fake-project'})
self.assertEqual(call_info['shuffle'], 2)
def test_split_cell_and_item(self):
path = 'australia', 'queensland', 'gold_coast'
cell = cells_utils._PATH_CELL_SEP.join(path)
item = 'host_5'
together = cells_utils.cell_with_item(cell, item)
self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]),
together)
# Test normal usage
result_cell, result_item = cells_utils.split_cell_and_item(together)
self.assertEqual(cell, result_cell)
self.assertEqual(item, result_item)
# Test with no cell
cell = None
together = cells_utils.cell_with_item(cell, item)
self.assertEqual(item, together)
result_cell, result_item = cells_utils.split_cell_and_item(together)
self.assertEqual(cell, result_cell)
self.assertEqual(item, result_item)
| {
"content_hash": "04d15b8373e81751e0aadb6587a4cac3",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 40.337078651685395,
"alnum_prop": 0.6094707520891365,
"repo_name": "sridevikoushik31/nova",
"id": "337556282122659f0166cd5cd345b1b240a05fba",
"size": "4226",
"binary": false,
"copies": "3",
"ref": "refs/heads/port_id_in_vif_on_devide",
"path": "nova/tests/cells/test_cells_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9944606"
},
{
"name": "Ruby",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
from google.cloud import datacatalog
from google.datacatalog_connectors.commons import prepare
from google.datacatalog_connectors.looker.prepare import constants
class DataCatalogTagTemplateFactory(prepare.BaseTagTemplateFactory):
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
def __init__(self, project_id, location_id):
self.__project_id = project_id
self.__location_id = location_id
def make_tag_template_for_dashboard(self):
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_DASHBOARD)
tag_template.display_name = 'Looker Dashboard Metadata'
self._add_primitive_type_field(tag_template, 'id', self.__STRING_TYPE,
'Unique Id')
self._add_primitive_type_field(tag_template, 'description',
self.__STRING_TYPE, 'Description')
self._add_primitive_type_field(tag_template, 'folder_id',
self.__STRING_TYPE, 'Folder Id')
self._add_primitive_type_field(tag_template, 'folder_name',
self.__STRING_TYPE, 'Folder Name')
self._add_primitive_type_field(tag_template, 'folder_entry',
self.__STRING_TYPE,
'Data Catalog Entry for the Folder')
self._add_primitive_type_field(tag_template, 'is_hidden',
self.__BOOL_TYPE, 'Is hidden')
self._add_primitive_type_field(tag_template, 'user_id',
self.__DOUBLE_TYPE, 'Id of User')
self._add_primitive_type_field(tag_template, 'view_count',
self.__DOUBLE_TYPE,
'Number of views in the web UI')
self._add_primitive_type_field(tag_template, 'favorite_count',
self.__DOUBLE_TYPE,
'Number of times favorited')
self._add_primitive_type_field(tag_template, 'last_accessed_at',
self.__TIMESTAMP_TYPE,
'Time it was last accessed')
self._add_primitive_type_field(tag_template, 'last_viewed_at',
self.__TIMESTAMP_TYPE,
'Time last viewed in the web UI')
self._add_primitive_type_field(tag_template, 'is_deleted',
self.__BOOL_TYPE, 'Is soft deleted')
self._add_primitive_type_field(tag_template, 'deleted_at',
self.__TIMESTAMP_TYPE,
'Time it was soft deleted')
self._add_primitive_type_field(tag_template, 'deleter_id',
self.__DOUBLE_TYPE,
'Id of User that soft deleted it')
self._add_primitive_type_field(tag_template, 'instance_url',
self.__STRING_TYPE,
'Looker Instance Url')
return tag_template
def make_tag_template_for_dashboard_element(self):
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_DASHBOARD_ELEMENT)
tag_template.display_name = 'Looker Dashboard Element Metadata'
self._add_primitive_type_field(tag_template, 'id', self.__STRING_TYPE,
'Unique Id')
self._add_primitive_type_field(tag_template, 'type',
self.__STRING_TYPE, 'Type')
self._add_primitive_type_field(tag_template, 'dashboard_id',
self.__STRING_TYPE, 'Id of Dashboard')
self._add_primitive_type_field(tag_template, 'dashboard_title',
self.__STRING_TYPE,
'Title of Dashboard')
self._add_primitive_type_field(tag_template, 'dashboard_entry',
self.__STRING_TYPE,
'Data Catalog Entry for the Dashboard')
self._add_primitive_type_field(tag_template, 'look_id',
self.__DOUBLE_TYPE, 'Id Of Look')
self._add_primitive_type_field(tag_template, 'look_title',
self.__STRING_TYPE, 'Title Of Look')
self._add_primitive_type_field(tag_template, 'look_entry',
self.__STRING_TYPE,
'Data Catalog Entry for the Look')
self._add_primitive_type_field(tag_template, 'lookml_link_id',
self.__STRING_TYPE, 'LookML link ID')
self._add_primitive_type_field(tag_template, 'query_id',
self.__DOUBLE_TYPE, 'Id Of Query')
self._add_primitive_type_field(tag_template, 'query_entry',
self.__STRING_TYPE,
'Data Catalog Entry for the Query')
self._add_primitive_type_field(tag_template, 'instance_url',
self.__STRING_TYPE,
'Looker Instance Url')
return tag_template
def make_tag_template_for_folder(self):
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_FOLDER)
tag_template.display_name = 'Looker Folder Metadata'
self._add_primitive_type_field(tag_template, 'id', self.__STRING_TYPE,
'Unique Id')
self._add_primitive_type_field(tag_template, 'name',
self.__STRING_TYPE, 'Unique Name')
self._add_primitive_type_field(tag_template, 'has_children',
self.__BOOL_TYPE, 'Has children')
self._add_primitive_type_field(tag_template, 'children_count',
self.__DOUBLE_TYPE, 'Children count')
self._add_primitive_type_field(tag_template, 'parent_id',
self.__STRING_TYPE, 'Id of Parent')
self._add_primitive_type_field(
tag_template, 'parent_entry', self.__STRING_TYPE,
'Data Catalog Entry for the parent Folder')
self._add_primitive_type_field(tag_template, 'has_dashboards',
self.__BOOL_TYPE, 'Has dashboards')
self._add_primitive_type_field(tag_template, 'dashboards_count',
self.__DOUBLE_TYPE, 'Dashboards count')
self._add_primitive_type_field(tag_template, 'has_looks',
self.__BOOL_TYPE, 'Has looks')
self._add_primitive_type_field(tag_template, 'looks_count',
self.__DOUBLE_TYPE, 'Looks count')
self._add_primitive_type_field(tag_template, 'instance_url',
self.__STRING_TYPE,
'Looker Instance Url')
return tag_template
def make_tag_template_for_look(self):
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_LOOK)
tag_template.display_name = 'Looker Look Metadata'
self._add_primitive_type_field(tag_template, 'id', self.__DOUBLE_TYPE,
'Unique Id')
self._add_primitive_type_field(tag_template, 'description',
self.__STRING_TYPE, 'Description')
self._add_primitive_type_field(tag_template, 'folder_id',
self.__STRING_TYPE, 'Folder Id')
self._add_primitive_type_field(tag_template, 'folder_name',
self.__STRING_TYPE, 'Folder Name')
self._add_primitive_type_field(tag_template, 'folder_entry',
self.__STRING_TYPE,
'Data Catalog Entry for the Folder')
self._add_primitive_type_field(tag_template, 'is_public',
self.__BOOL_TYPE, 'Is public')
self._add_primitive_type_field(tag_template, 'user_id',
self.__DOUBLE_TYPE, 'Id of User')
self._add_primitive_type_field(tag_template, 'last_updater_id',
self.__DOUBLE_TYPE,
'Id of User that last updated it')
self._add_primitive_type_field(tag_template, 'query_id',
self.__DOUBLE_TYPE, 'Query Id')
self._add_primitive_type_field(tag_template, 'query_entry',
self.__STRING_TYPE,
'Data Catalog Entry for the Query')
self._add_primitive_type_field(tag_template, 'url', self.__STRING_TYPE,
'Url')
self._add_primitive_type_field(tag_template, 'short_url',
self.__STRING_TYPE, 'Short Url')
self._add_primitive_type_field(tag_template, 'public_url',
self.__STRING_TYPE, 'Public Url')
self._add_primitive_type_field(tag_template, 'excel_file_url',
self.__STRING_TYPE, 'Excel File Url')
self._add_primitive_type_field(tag_template,
'google_spreadsheet_formula',
self.__STRING_TYPE,
'Google Spreadsheet Formula')
self._add_primitive_type_field(tag_template, 'view_count',
self.__DOUBLE_TYPE,
'Number of views in the web UI')
self._add_primitive_type_field(tag_template, 'favorite_count',
self.__DOUBLE_TYPE,
'Number of times favorited')
self._add_primitive_type_field(tag_template, 'last_accessed_at',
self.__TIMESTAMP_TYPE,
'Time it was last accessed')
self._add_primitive_type_field(tag_template, 'last_viewed_at',
self.__TIMESTAMP_TYPE,
'Time last viewed in the web UI')
self._add_primitive_type_field(tag_template, 'is_deleted',
self.__BOOL_TYPE, 'Is soft deleted')
self._add_primitive_type_field(tag_template, 'deleted_at',
self.__TIMESTAMP_TYPE,
'Time it was soft deleted')
self._add_primitive_type_field(tag_template, 'deleter_id',
self.__DOUBLE_TYPE,
'Id of User that soft deleted it')
self._add_primitive_type_field(tag_template, 'instance_url',
self.__STRING_TYPE,
'Looker Instance Url')
return tag_template
def make_tag_template_for_query(self):
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_QUERY)
tag_template.display_name = 'Looker Query Metadata'
self._add_primitive_type_field(tag_template, 'id', self.__DOUBLE_TYPE,
'Unique Id')
self._add_primitive_type_field(tag_template, 'fields',
self.__STRING_TYPE, 'Fields')
self._add_primitive_type_field(tag_template, 'pivots',
self.__STRING_TYPE, 'Pivots')
self._add_primitive_type_field(tag_template, 'sorts',
self.__STRING_TYPE,
'Sorting for the results')
self._add_primitive_type_field(tag_template, 'runtime',
self.__DOUBLE_TYPE, 'Runtime')
self._add_primitive_type_field(tag_template, 'client_id',
self.__STRING_TYPE,
'Id for explore URLs')
self._add_primitive_type_field(tag_template, 'query_timezone',
self.__STRING_TYPE, 'Query Timezone')
self._add_primitive_type_field(tag_template, 'lookml_model',
self.__STRING_TYPE, 'LookML Model name')
self._add_primitive_type_field(tag_template, 'explore_name',
self.__STRING_TYPE, 'Explore name')
self._add_primitive_type_field(tag_template, 'sql', self.__STRING_TYPE,
'Generated SQL')
self._add_primitive_type_field(tag_template, 'lookml_project',
self.__STRING_TYPE, 'LookML Project')
self._add_primitive_type_field(tag_template, 'connection',
self.__STRING_TYPE, 'Connection name')
self._add_primitive_type_field(tag_template, 'host',
self.__STRING_TYPE,
'Server hostname or address')
self._add_primitive_type_field(tag_template, 'database',
self.__STRING_TYPE, 'Database name')
self._add_primitive_type_field(tag_template, 'connection_dialect',
self.__STRING_TYPE, 'SQL Dialect name')
self._add_primitive_type_field(tag_template, 'connection_username',
self.__STRING_TYPE,
'Username for server authentication')
self._add_primitive_type_field(tag_template, 'instance_url',
self.__STRING_TYPE,
'Looker Instance Url')
return tag_template
| {
"content_hash": "c548967f6c3615bb9694ae2275050cb5",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 79,
"avg_line_length": 45.334310850439884,
"alnum_prop": 0.49401643055825084,
"repo_name": "GoogleCloudPlatform/datacatalog-connectors-bi",
"id": "93b762c203524d9f34bdc2a7f9aabf6efcb04547",
"size": "16055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-datacatalog-looker-connector/src/google/datacatalog_connectors/looker/prepare/datacatalog_tag_template_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3191"
},
{
"name": "Python",
"bytes": "980579"
},
{
"name": "Shell",
"bytes": "9469"
}
],
"symlink_target": ""
} |
import numpy as np
import cirq
import cirq.contrib.quimb as ccq
def test_tensor_density_matrix_1():
q = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.YPowGate(exponent=0.25).on(q[0]))
rho1 = cirq.final_density_matrix(c, qubit_order=q, dtype=np.complex128)
rho2 = ccq.tensor_density_matrix(c, q)
np.testing.assert_allclose(rho1, rho2, atol=1e-15)
def test_tensor_density_matrix_optional_qubits():
q = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.YPowGate(exponent=0.25).on(q[0]))
rho1 = cirq.final_density_matrix(c, dtype=np.complex128)
rho2 = ccq.tensor_density_matrix(c)
np.testing.assert_allclose(rho1, rho2, atol=1e-15)
def test_tensor_density_matrix_noise_1():
q = cirq.LineQubit.range(2)
c = cirq.Circuit(
cirq.YPowGate(exponent=0.25).on(q[0]),
cirq.amplitude_damp(1e-2).on(q[0]),
cirq.phase_damp(1e-3).on(q[0]),
)
rho1 = cirq.final_density_matrix(c, qubit_order=q, dtype=np.complex128)
rho2 = ccq.tensor_density_matrix(c, q)
np.testing.assert_allclose(rho1, rho2, atol=1e-15)
def test_tensor_density_matrix_2():
q = cirq.LineQubit.range(2)
rs = np.random.RandomState(52)
for _ in range(10):
g = cirq.MatrixGate(cirq.testing.random_unitary(dim=2 ** len(q), random_state=rs))
c = cirq.Circuit(g.on(*q))
rho1 = cirq.final_density_matrix(c, dtype=np.complex128)
rho2 = ccq.tensor_density_matrix(c, q)
np.testing.assert_allclose(rho1, rho2, atol=1e-8)
def test_tensor_density_matrix_3():
qubits = cirq.LineQubit.range(10)
circuit = cirq.testing.random_circuit(qubits=qubits, n_moments=10, op_density=0.8)
rho1 = cirq.final_density_matrix(circuit, dtype=np.complex128)
rho2 = ccq.tensor_density_matrix(circuit, qubits)
np.testing.assert_allclose(rho1, rho2, atol=1e-8)
def test_tensor_density_matrix_4():
qubits = cirq.LineQubit.range(4)
circuit = cirq.testing.random_circuit(qubits=qubits, n_moments=100, op_density=0.8)
cirq.DropEmptyMoments().optimize_circuit(circuit)
noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))
circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, qubits))
rho1 = cirq.final_density_matrix(circuit, dtype=np.complex128)
rho2 = ccq.tensor_density_matrix(circuit, qubits)
np.testing.assert_allclose(rho1, rho2, atol=1e-8)
| {
"content_hash": "55f3d36253d6b83758abd300721b3b21",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 90,
"avg_line_length": 36.86153846153846,
"alnum_prop": 0.6878130217028381,
"repo_name": "balopat/Cirq",
"id": "2112de986da44a32d5a0f7331a5e4b41b64fbf68",
"size": "2396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/contrib/quimb/density_matrix_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5923"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "Jupyter Notebook",
"bytes": "23905"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6256825"
},
{
"name": "Shell",
"bytes": "50383"
},
{
"name": "Starlark",
"bytes": "5979"
}
],
"symlink_target": ""
} |
from conan.packager import ConanMultiPackager
import platform
if __name__ == "__main__":
builder = ConanMultiPackager(args="--build missing")
builder.add_common_builds()
builder.run() | {
"content_hash": "5040835edcfdf0cf619b723eeeb91e69",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 24.625,
"alnum_prop": 0.700507614213198,
"repo_name": "sunxfancy/CodeFactory",
"id": "ce24ba3a8615ccf0bffb9776e25ccdfbf174503c",
"size": "197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "templates/cpplib/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2286"
},
{
"name": "CMake",
"bytes": "6591"
},
{
"name": "CSS",
"bytes": "13352"
},
{
"name": "HTML",
"bytes": "2772"
},
{
"name": "Makefile",
"bytes": "583"
},
{
"name": "Python",
"bytes": "250373"
},
{
"name": "Shell",
"bytes": "1272"
}
],
"symlink_target": ""
} |
import itk
ImageType = itk.Image[itk.UL, 3]
image = ImageType.New()
print("ITK Hello World!")
print(image)
| {
"content_hash": "ea85819166c17b3df8f7fff8951861c1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 32,
"avg_line_length": 15.571428571428571,
"alnum_prop": 0.7064220183486238,
"repo_name": "InsightSoftwareConsortium/ITKExamples",
"id": "ee831f22754029f92d5d65024649867cb81ddea3",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Core/Common/BuildAHelloWorldProgram/Code.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1345317"
},
{
"name": "CMake",
"bytes": "468162"
},
{
"name": "CSS",
"bytes": "2087"
},
{
"name": "HTML",
"bytes": "8446"
},
{
"name": "JavaScript",
"bytes": "4743"
},
{
"name": "Python",
"bytes": "325825"
},
{
"name": "Shell",
"bytes": "37497"
}
],
"symlink_target": ""
} |
from sqlalchemy import MetaData, Table, func, select
from sqlalchemy.sql import and_
from jacket.compute import exception
from jacket.i18n import _
def upgrade(migrate_engine):
meta = MetaData(migrate_engine)
instances = Table('instances', meta, autoload=True)
sysmeta = Table('instance_system_metadata', meta, autoload=True)
count = select([func.count()]).select_from(sysmeta).where(
and_(instances.c.uuid == sysmeta.c.instance_uuid,
sysmeta.c.key == 'instance_type_id',
sysmeta.c.deleted != sysmeta.c.id,
instances.c.deleted != instances.c.id)).execute().scalar()
if count > 0:
msg = _('There are still %(count)i unmigrated flavor records. '
'Migration cannot continue until all instance flavor '
'records have been migrated to the new format. Please run '
'`compute-manage db migrate_flavor_data\' first.') % {
'count': count}
raise exception.ValidationError(detail=msg)
| {
"content_hash": "061e874b1ea8f1baa9ca95741ed0a92d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 44.69565217391305,
"alnum_prop": 0.6400778210116731,
"repo_name": "HybridF5/jacket",
"id": "89c995c16fbcdbc12e2492f3cbe3b0130f5e9f98",
"size": "1601",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jacket/db/sqlalchemy/migrate_repo/versions/076_compute_291_enforce_flavors_migrated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
"""
Memory Backend
--------------
Provides a simple dictionary-based backend.
"""
from dogpile.cache.api import CacheBackend, NO_VALUE
class MemoryBackend(CacheBackend):
"""A backend that uses a plain dictionary.
There is no size management, and values which
are placed into the dictionary will remain
until explicitly removed. Note that
Dogpile's expiration of items is based on
timestamps and does not remove them from
the cache.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.memory'
)
To use a Python dictionary of your choosing,
it can be passed in with the ``cache_dict``
argument::
my_dictionary = {}
region = make_region().configure(
'dogpile.cache.memory',
arguments={
"cache_dict":my_dictionary
}
)
"""
def __init__(self, arguments):
self._cache = arguments.pop("cache_dict", {})
def get(self, key):
return self._cache.get(key, NO_VALUE)
def get_multi(self, keys):
return [
self._cache.get(key, NO_VALUE)
for key in keys
]
def set(self, key, value):
self._cache[key] = value
def set_multi(self, mapping):
for key,value in mapping.items():
self._cache[key] = value
def delete(self, key):
self._cache.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self._cache.pop(key, None)
| {
"content_hash": "195364d33156c563f4b3bb87fbb10238",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 53,
"avg_line_length": 23.029411764705884,
"alnum_prop": 0.5804597701149425,
"repo_name": "dprince/dogpile.cache",
"id": "e606beab519f2e44519756c5dd30f62f2a1d5473",
"size": "1566",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dogpile/cache/backends/memory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8775"
},
{
"name": "Shell",
"bytes": "3077"
}
],
"symlink_target": ""
} |
from Stack import Stack
def divideBy2(decNumber):
remstack = Stack()
while decNumber > 0:
rem = decNumber % 2
remstack.push(rem)
decNumber = decNumber // 2
binString = ""
while not remstack.empty():
binString = binString + str(remstack.pop())
return binString
if __name__ == '__main__':
print(divideBy2(42)) | {
"content_hash": "493997222dc4d8635425be2a874c7208",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 21.58823529411765,
"alnum_prop": 0.5967302452316077,
"repo_name": "NiuXWolf/Introduction-to-Algorithms",
"id": "c6300451ed7ebd3a2139041dfbe0267949ef926a",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "00/dec2bin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20023"
}
],
"symlink_target": ""
} |
'''
Basic tests for instantiating the main object
'''
import pytest
from tourbillon import Tourbillon
def test_tourbillon():
tb = Tourbillon()
| {
"content_hash": "b440cb600380a4b8c473a1bff2b6f006",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 14.7,
"alnum_prop": 0.7482993197278912,
"repo_name": "adamgilman/tourbillon",
"id": "a79524aaa41b1a67c8a531ca9151e07b4c88ce3c",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tourbillon_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9188"
}
],
"symlink_target": ""
} |
"""
Convert json to tsv from stdin by extracting the specified objects.
e.g., cat my.json | json2tsv id user.name
"""
import argparse
import json
import sys
from . import util
def main():
parser = argparse.ArgumentParser(
description='Convert json to tsv from stdin by extracting the ' +
'specified objects.')
parser.add_argument('fields', nargs='+', help='fields to print')
parser.add_argument(
'--headers', action="store_true", help='print headers as first row')
args = parser.parse_args()
input = util.read(sys.stdin)
output = util.write(sys.stdout)
run(input, output, args.fields, args.headers)
def run(input, output, fields, headers):
# Write headers
if headers:
output.write('\t'.join(util.encode(f) for f in fields))
output.write('\n')
# Write lines from input
for i, line in enumerate(input):
lineno = i + 1 # i starts at 0, so add 1
# Try to read some JSON
try:
obj_or_list = json.loads(line)
except Exception as e:
sys.stderr.write('line %s is not valid JSON: %s\n' % (lineno, e))
continue
if isinstance(obj_or_list, list):
json_list = obj_or_list
for obj in json_list:
output.write('\t'.join(extract_row(fields, obj)))
output.write('\n')
elif isinstance(obj_or_list, dict):
obj = obj_or_list
output.write('\t'.join(extract_row(fields, obj)))
output.write('\n')
else:
sys.stderr.write('line %s is not a JSON list or object: %r\n' %
(lineno, line))
def extract_row(fields, obj):
return (util.encode(extract_value(field, obj)) for field in fields)
def extract_value(field, obj):
"""
>>> js = {'id': 123, 'user': {'name': 'mary'}}
>>> extract_value('id', js)
123
>>> extract_value('user', js)
{'name': 'mary'}
>>> extract_value('user.name', js)
'mary'
>>> extract_value('-', js)
{'id': 123, 'user': {'name': 'mary'}}
>>> extract_value('not_valid', js)
"""
if field == "-": # Special case -- return whole json object
return obj
parts = field.split('.')
for p in parts:
if isinstance(obj, dict) and p in obj:
obj = obj[p]
else:
return None
return obj
| {
"content_hash": "c216edb15daa26f8d58917e1026112b7",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 28.69047619047619,
"alnum_prop": 0.5568464730290457,
"repo_name": "tapilab/json2tsv",
"id": "2a0fc4070215c87dc1069863adda838a5c656dbb",
"size": "2432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "json2tsv/json2tsv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1273"
},
{
"name": "Python",
"bytes": "9571"
}
],
"symlink_target": ""
} |
import neuroml_via_xsl_core
import neuroml_via_xsl_neuron
| {
"content_hash": "c5abfa7c2dea8997d47d721e34147c77",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 29,
"alnum_prop": 0.8275862068965517,
"repo_name": "mikehulluk/morphforge",
"id": "920d9065a28f314980e02381fc70df7390308a62",
"size": "1597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforgecontrib/simulation/channels/neuroml_via_xsl/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
} |
"""Support for Ring Doorbell/Chimes."""
from __future__ import annotations
import asyncio
from datetime import timedelta
from functools import partial
import logging
from pathlib import Path
from oauthlib.oauth2 import AccessDeniedError
import requests
from ring_doorbell import Auth, Ring
from homeassistant.const import __version__
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.async_ import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Ring.com"
NOTIFICATION_ID = "ring_notification"
NOTIFICATION_TITLE = "Ring Setup"
DOMAIN = "ring"
DEFAULT_ENTITY_NAMESPACE = "ring"
PLATFORMS = ("binary_sensor", "light", "sensor", "switch", "camera")
async def async_setup(hass, config):
"""Set up the Ring component."""
if DOMAIN not in config:
return True
def legacy_cleanup():
"""Clean up old tokens."""
old_cache = Path(hass.config.path(".ring_cache.pickle"))
if old_cache.is_file():
old_cache.unlink()
await hass.async_add_executor_job(legacy_cleanup)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
def token_updater(token):
"""Handle from sync context when token is updated."""
run_callback_threadsafe(
hass.loop,
partial(
hass.config_entries.async_update_entry,
entry,
data={**entry.data, "token": token},
),
).result()
auth = Auth(f"HomeAssistant/{__version__}", entry.data["token"], token_updater)
ring = Ring(auth)
try:
await hass.async_add_executor_job(ring.update_data)
except AccessDeniedError:
_LOGGER.error("Access token is no longer valid. Please set up Ring again")
return False
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"api": ring,
"devices": ring.devices(),
"device_data": GlobalDataUpdater(
hass, "device", entry.entry_id, ring, "update_devices", timedelta(minutes=1)
),
"dings_data": GlobalDataUpdater(
hass,
"active dings",
entry.entry_id,
ring,
"update_dings",
timedelta(seconds=5),
),
"history_data": DeviceDataUpdater(
hass,
"history",
entry.entry_id,
ring,
lambda device: device.history(limit=10),
timedelta(minutes=1),
),
"health_data": DeviceDataUpdater(
hass,
"health",
entry.entry_id,
ring,
lambda device: device.update_health_data(),
timedelta(minutes=1),
),
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
if hass.services.has_service(DOMAIN, "update"):
return True
async def async_refresh_all(_):
"""Refresh all ring data."""
for info in hass.data[DOMAIN].values():
await info["device_data"].async_refresh_all()
await info["dings_data"].async_refresh_all()
await hass.async_add_executor_job(info["history_data"].refresh_all)
await hass.async_add_executor_job(info["health_data"].refresh_all)
# register service
hass.services.async_register(DOMAIN, "update", async_refresh_all)
return True
async def async_unload_entry(hass, entry):
"""Unload Ring entry."""
if not await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
return False
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.data[DOMAIN]) != 0:
return True
# Last entry unloaded, clean up service
hass.services.async_remove(DOMAIN, "update")
return True
class GlobalDataUpdater:
"""Data storage for single API endpoint."""
def __init__(
self,
hass: HomeAssistant,
data_type: str,
config_entry_id: str,
ring: Ring,
update_method: str,
update_interval: timedelta,
) -> None:
"""Initialize global data updater."""
self.hass = hass
self.data_type = data_type
self.config_entry_id = config_entry_id
self.ring = ring
self.update_method = update_method
self.update_interval = update_interval
self.listeners = []
self._unsub_interval = None
@callback
def async_add_listener(self, update_callback):
"""Listen for data updates."""
# This is the first listener, set up interval.
if not self.listeners:
self._unsub_interval = async_track_time_interval(
self.hass, self.async_refresh_all, self.update_interval
)
self.listeners.append(update_callback)
@callback
def async_remove_listener(self, update_callback):
"""Remove data update."""
self.listeners.remove(update_callback)
if not self.listeners:
self._unsub_interval()
self._unsub_interval = None
async def async_refresh_all(self, _now: int | None = None) -> None:
"""Time to update."""
if not self.listeners:
return
try:
await self.hass.async_add_executor_job(
getattr(self.ring, self.update_method)
)
except AccessDeniedError:
_LOGGER.error("Ring access token is no longer valid. Set up Ring again")
await self.hass.config_entries.async_unload(self.config_entry_id)
return
except requests.Timeout:
_LOGGER.warning(
"Time out fetching Ring %s data",
self.data_type,
)
return
except requests.RequestException as err:
_LOGGER.warning(
"Error fetching Ring %s data: %s",
self.data_type,
err,
)
return
for update_callback in self.listeners:
update_callback()
class DeviceDataUpdater:
"""Data storage for device data."""
def __init__(
self,
hass: HomeAssistant,
data_type: str,
config_entry_id: str,
ring: Ring,
update_method: str,
update_interval: timedelta,
) -> None:
"""Initialize device data updater."""
self.data_type = data_type
self.hass = hass
self.config_entry_id = config_entry_id
self.ring = ring
self.update_method = update_method
self.update_interval = update_interval
self.devices = {}
self._unsub_interval = None
async def async_track_device(self, device, update_callback):
"""Track a device."""
if not self.devices:
self._unsub_interval = async_track_time_interval(
self.hass, self.refresh_all, self.update_interval
)
if device.device_id not in self.devices:
self.devices[device.device_id] = {
"device": device,
"update_callbacks": [update_callback],
"data": None,
}
# Store task so that other concurrent requests can wait for us to finish and
# data be available.
self.devices[device.device_id]["task"] = asyncio.current_task()
self.devices[device.device_id][
"data"
] = await self.hass.async_add_executor_job(self.update_method, device)
self.devices[device.device_id].pop("task")
else:
self.devices[device.device_id]["update_callbacks"].append(update_callback)
# If someone is currently fetching data as part of the initialization, wait for them
if "task" in self.devices[device.device_id]:
await self.devices[device.device_id]["task"]
update_callback(self.devices[device.device_id]["data"])
@callback
def async_untrack_device(self, device, update_callback):
"""Untrack a device."""
self.devices[device.device_id]["update_callbacks"].remove(update_callback)
if not self.devices[device.device_id]["update_callbacks"]:
self.devices.pop(device.device_id)
if not self.devices:
self._unsub_interval()
self._unsub_interval = None
def refresh_all(self, _=None):
"""Refresh all registered devices."""
for device_id, info in self.devices.items():
try:
data = info["data"] = self.update_method(info["device"])
except AccessDeniedError:
_LOGGER.error("Ring access token is no longer valid. Set up Ring again")
self.hass.add_job(
self.hass.config_entries.async_unload(self.config_entry_id)
)
return
except requests.Timeout:
_LOGGER.warning(
"Time out fetching Ring %s data for device %s",
self.data_type,
device_id,
)
continue
except requests.RequestException as err:
_LOGGER.warning(
"Error fetching Ring %s data for device %s: %s",
self.data_type,
device_id,
err,
)
continue
for update_callback in info["update_callbacks"]:
self.hass.loop.call_soon_threadsafe(update_callback, data)
| {
"content_hash": "9c433de6a41600cd05be76b6a77c23d1",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 96,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.5758681822922098,
"repo_name": "Danielhiversen/home-assistant",
"id": "a8196b30302f6ebc48587ac8a8e55828ac8e01b8",
"size": "9589",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ring/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
Norwegian-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
import datetime
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .no_municipalities import MUNICIPALITY_CHOICES
class NOZipCodeField(RegexField):
"""
A form field that validates input as a Norwegian zip code. Valid codes
have four digits.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(NOZipCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class NOMunicipalitySelect(Select):
"""
A Select widget that uses a list of Norwegian municipalities (fylker)
as its choices.
"""
def __init__(self, attrs=None):
super(NOMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)
class NOSocialSecurityNumber(Field):
"""
Algorithm is documented at http://no.wikipedia.org/wiki/Personnummer
"""
default_error_messages = {
'invalid': _('Enter a valid Norwegian social security number.'),
}
def clean(self, value):
super(NOSocialSecurityNumber, self).clean(value)
if value in EMPTY_VALUES:
return ''
if not re.match(r'^\d{11}$', value):
raise ValidationError(self.error_messages['invalid'])
day = int(value[:2])
month = int(value[2:4])
year2 = int(value[4:6])
inum = int(value[6:9])
self.birthday = None
try:
if 000 <= inum < 500:
self.birthday = datetime.date(1900 + year2, month, day)
if 500 <= inum < 750 and year2 > 54:
self.birthday = datetime.date(1800 + year2, month, day)
if 500 <= inum < 1000 and year2 < 40:
self.birthday = datetime.date(2000 + year2, month, day)
if 900 <= inum < 1000 and year2 > 39:
self.birthday = datetime.date(1900 + year2, month, day)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
sexnum = int(value[8])
if sexnum % 2 == 0:
self.gender = 'F'
else:
self.gender = 'M'
digits = map(int, list(value))
weight_1 = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1, 0]
weight_2 = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1]
def multiply_reduce(aval, bval):
return sum([(a * b) for (a, b) in zip(aval, bval)])
if multiply_reduce(digits, weight_1) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
if multiply_reduce(digits, weight_2) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
class NOPhoneNumberField(RegexField):
"""
Field with phonenumber validation. Requires a phone number with
8 digits and optional country code
"""
default_error_messages = {
'invalid': _('A phone number must be 8 digits and may have country code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(NOPhoneNumberField, self).__init__(r'^(?:\+47)? ?(\d{3}\s?\d{2}\s?\d{3}|\d{2}\s?\d{2}\s?\d{2}\s?\d{2})$',
max_length, min_length, *args, **kwargs)
| {
"content_hash": "76cd2d5d835564dfdc0ddac76c678dd1",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 119,
"avg_line_length": 33.33644859813084,
"alnum_prop": 0.5834034202410989,
"repo_name": "zarelit/django-localflavor",
"id": "9b4dc8e9d37d21be793fc1dc2637b3039cf87759",
"size": "3567",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "localflavor/no/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "766801"
}
],
"symlink_target": ""
} |
import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
BASE_DIR = PACKAGE_ROOT
DEBUG = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "dev.db",
}
}
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "static", "dist"),
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = ")x8&88d=og_shjv*z+k%slq6+6ll^&xka56she93c6_mxt#tp6"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PACKAGE_ROOT, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"debug": DEBUG,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
],
},
},
]
MIDDLEWARE_CLASSES = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "dappjango.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "dappjango.wsgi.application"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"pinax_theme_bootstrap",
# external
"account",
"pinax.eventlog",
"pinax.webanalytics",
# project
"dappjango",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False
ACCOUNT_LOGIN_REDIRECT_URL = "home"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
ACCOUNT_USE_AUTH_AUTHENTICATE = True
AUTHENTICATION_BACKENDS = [
"account.auth_backends.UsernameAuthenticationBackend",
]
| {
"content_hash": "b76df16e07964b0d072080942cdb7c4b",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 82,
"avg_line_length": 31.489247311827956,
"alnum_prop": 0.6779921461499061,
"repo_name": "GuildLeader/interfaces",
"id": "d950ca88d4873911d193ea2e31432572e9e01bf7",
"size": "5857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dappjango/dappjango/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1067"
},
{
"name": "HTML",
"bytes": "4945"
},
{
"name": "JavaScript",
"bytes": "9415"
},
{
"name": "Python",
"bytes": "8883"
}
],
"symlink_target": ""
} |
"""
Test the regressor influence visualizers.
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
import matplotlib.pyplot as plt
from tests.base import VisualTestCase
from tests.fixtures import TestDataset
from sklearn.datasets import make_regression
from yellowbrick.regressor.influence import *
from yellowbrick.datasets import load_concrete
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope='class')
def data(request):
"""
Creates a random regression fixture that has a R2 score below 0.85 and several
outliers that best demonstrate the effectiveness of influence visualizers.
"""
X, y = make_regression(
n_samples=100,
n_features=14,
n_informative=6,
bias=1.2,
noise=49.8,
tail_strength=0.6,
random_state=637,
)
request.cls.data = TestDataset(X, y)
##########################################################################
## Assertion Helpers
##########################################################################
LEARNED_FIELDS = (
'distance_', 'p_values_', 'influence_threshold_', 'outlier_percentage_'
)
def assert_not_fitted(oz):
for field in LEARNED_FIELDS:
assert not hasattr(oz, field)
def assert_fitted(oz):
for field in LEARNED_FIELDS:
assert hasattr(oz, field)
##########################################################################
## Test CooksDistance Visualizer
##########################################################################
@pytest.mark.usefixtures("data")
class TestCooksDistance(VisualTestCase):
"""
CooksDistance visual test cases
"""
def test_cooks_distance(self):
"""
Test image similarity of Cook's Distance on a random dataset
"""
_, ax = plt.subplots()
viz = CooksDistance(ax=ax)
assert_not_fitted(viz)
assert viz.fit(self.data.X, self.data.y) is viz
assert_fitted(viz)
# Test fitted values
assert viz.distance_.shape == (self.data.X.shape[0],)
assert viz.p_values_.shape == viz.distance_.shape
assert 0.0 <= viz.influence_threshold_ <= 4.0
assert 0.0 <= viz.outlier_percentage_ <= 100.0
self.assert_images_similar(viz)
def test_cooks_distance_quickmethod(self):
"""
Test the cooks_distance quick method on a random dataset
"""
_, ax = plt.subplots()
viz = cooks_distance(
self.data.X,
self.data.y,
ax=ax,
draw_threshold=False,
linefmt="r-",
markerfmt="ro",
)
assert_fitted(viz)
self.assert_images_similar(viz)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Test on the concrete dataset with pandas DataFrame and Series
"""
data = load_concrete(return_dataset=True)
X, y = data.to_pandas()
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
_, ax = plt.subplots()
viz = CooksDistance(ax=ax).fit(X, y)
assert_fitted(viz)
assert viz.distance_.sum() == pytest.approx(1.2911900571300652)
assert viz.p_values_.sum() == pytest.approx(1029.9999525376425)
assert viz.influence_threshold_ == pytest.approx(0.003883495145631068)
assert viz.outlier_percentage_ == pytest.approx(7.3786407766990285)
viz.finalize()
self.assert_images_similar(viz)
def test_numpy_integration(self):
"""
Test on concrete dataset with numpy arrays
"""
data = load_concrete(return_dataset=True)
X, y = data.to_numpy()
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
_, ax = plt.subplots()
viz = CooksDistance(ax=ax).fit(X, y)
assert_fitted(viz)
assert viz.distance_.sum() == pytest.approx(1.2911900571300652)
assert viz.p_values_.sum() == pytest.approx(1029.9999525376425)
assert viz.influence_threshold_ == pytest.approx(0.003883495145631068)
assert viz.outlier_percentage_ == pytest.approx(7.3786407766990285)
viz.finalize()
self.assert_images_similar(viz) | {
"content_hash": "1e2f6c078d5819a9dd9e4b200500e710",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 82,
"avg_line_length": 29.044303797468356,
"alnum_prop": 0.5434735236434953,
"repo_name": "pdamodaran/yellowbrick",
"id": "339179f32c16085df780de7e684b623fec848f33",
"size": "4915",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_regressor/test_influence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "1218356"
},
{
"name": "TeX",
"bytes": "3743"
}
],
"symlink_target": ""
} |
"""
imshows.py: wrappers around matplotlib.pyplot.imshow with saner
defaults for 3D scientific images.
"""
import numpy as np
from matplotlib import pyplot as plt, cm, colors
from skimage import color
def cshow(im):
"""Show an image (or cross-section) with the cubehelix colormap.
Parameters
----------
im : array
An array of intensity values (ie not multichannel).
Returns
-------
ax : matplotlib AxesImage object
The figure axes.
Notes
-----
For nD images with n > 2, ``cshow`` repeatedly takes the middle
cross- section of leading axes until a 2D image remains. For
example, given an array `im` of shape ``(7, 512, 512)``, ``cshow``
will display `im[3]`. For shape ``(4, 50, 50, 50)``, ``cshow`` will
display `im[2, 25]`.
"""
if im.ndim > 2:
mid = im.shape[0] // 2
ax = cshow(im[mid])
else:
ax = plt.imshow(im, cmap=cm.cubehelix, interpolation='nearest')
return ax
def _factors(n):
"""Return integer factors of `n`, not including 1 or `n`.
Parameters
----------
n : int
Integer for which we want a factorization.
Returns
-------
fs : list of int
The list of factors of `n` (empty if `n` is prime).
Examples
--------
>>> _factors(10)
[2, 5]
>>> _factors(20)
[2, 4, 5, 10]
"""
fs = filter(lambda i: (n % i == 0), range(2, 1 + n/2))
return fs
def rshow(values):
"""Show a 1D vector of values in a rectangular grid.
Parameters
----------
values : 1D array
The values to be plotted.
Returns
-------
ax : matplotlib AxesImage object
The figure axes.
Notes
-----
If the number of values is prime, rshow will revert to a line plot.
"""
n = len(values)
fs = _factors(n)
k = len(fs)
if k == 0:
return plt.plot(values)
else:
new_shape = (-1, fs[k // 2])
values_im = values.reshape(new_shape)
return cshow(values_im)
def nshow(im):
"""Show an image after normalising each channel to [0, 255] uint8.
Parameters
----------
im : array
The input image.
Returns
-------
ax : matplotlib AxesImage object
The figure axes.
"""
channel_mins = im.min(axis=0).min(axis=0)[np.newaxis, np.newaxis, :]
channel_maxs = im.max(axis=0).max(axis=0)[np.newaxis, np.newaxis, :]
im_out = (im.astype(float) - channel_mins) / (channel_maxs - channel_mins)
ax = plt.imshow(im_out)
return ax
def sshow(im, labrandom=True):
"""Show a segmentation (or cross-section) using a random colormap.
Parameters
----------
im : np.ndarray of int
The segmentation to be displayed.
labrandom : bool, optional
Use random points in the Lab colorspace instead of RGB.
Returns
-------
ax : matplotlib AxesImage object
The figure axes.
"""
if im.ndim > 2:
mid = im.shape[0] // 2
ax = sshow(im[mid], labrandom)
else:
rand_colors = np.random.rand(np.ceil(im.max()), 3)
if labrandom:
rand_colors[:, 0] = rand_colors[:, 0] * 60 + 20
rand_colors[:, 1] = rand_colors[:, 1] * 185 - 85
rand_colors[:, 2] = rand_colors[:, 2] * 198 - 106
rand_colors = color.lab2rgb(rand_colors[np.newaxis, ...])[0]
rand_colors[rand_colors < 0] = 0
rand_colors[rand_colors > 1] = 1
rcmap = colors.ListedColormap(np.concatenate((np.zeros((1, 3)),
rand_colors)))
ax = plt.imshow(im, cmap=rcmap, interpolation='nearest')
return ax
| {
"content_hash": "acec767a11671053f1e301af57757813",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 25.985915492957748,
"alnum_prop": 0.5552845528455285,
"repo_name": "jni/vis",
"id": "e9a861f85efa74b4837ccd082f39501cecc186db",
"size": "3690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vis/imshows.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3787"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.DecimalField(default=0, max_digits=50, decimal_places=2)),
('active', models.BooleanField(default=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('cart', models.ForeignKey(to='cart.Cart')),
('product', models.ForeignKey(to='products.Product')),
],
),
]
| {
"content_hash": "bcc6a88b5b684aa1a20ba10c2c041fab",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 39.361111111111114,
"alnum_prop": 0.5758645024700071,
"repo_name": "codingforentrepreneurs/marketplace",
"id": "bc65bd7103d1a1b836b0c64473e5910bbf37b30d",
"size": "1441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cart/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "HTML",
"bytes": "13438"
},
{
"name": "JavaScript",
"bytes": "108201"
},
{
"name": "Python",
"bytes": "38107"
}
],
"symlink_target": ""
} |
"""
============================
Experiment with eye-tracking
============================
Integration with Eyelink functionality makes programming experiments
using eye-tracking simpler.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from expyfun import ExperimentController, EyelinkController, visual
import expyfun.analyze as ea
print(__doc__)
with ExperimentController('testExp', full_screen=True, participant='foo',
session='001', output_dir=None, version='dev') as ec:
el = EyelinkController(ec)
ec.screen_prompt('Welcome to the experiment!\n\nFirst, we will '
'perform a screen calibration.\n\nPress a button '
'to continue.')
el.calibrate() # by default this starts recording EyeLink data
ec.screen_prompt('Excellent! Now, follow the red circle around the edge '
'of the big white circle.\n\nPress a button to '
'continue')
# make some circles to be drawn
radius = 7.5 # degrees
targ_rad = 0.2 # degrees
theta = np.linspace(np.pi / 2., 2.5 * np.pi, 200)
x_pos, y_pos = radius * np.cos(theta), radius * np.sin(theta)
big_circ = visual.Circle(ec, radius, (0, 0), units='deg',
fill_color=None, line_color='white',
line_width=3.0)
targ_circ = visual.Circle(ec, targ_rad, (x_pos[0], y_pos[0]),
units='deg', fill_color='red')
fix_pos = (x_pos[0], y_pos[0])
# start out by waiting for a 1 sec fixation at the start
big_circ.draw()
targ_circ.draw()
screenshot = ec.screenshot()
ec.identify_trial(ec_id='Circle', ttl_id=[0], el_id=[0])
ec.start_stimulus() # automatically stamps to EL
if not el.wait_for_fix(fix_pos, 1., max_wait=5., units='deg'):
print('Initial fixation failed')
for ii, (x, y) in enumerate(zip(x_pos[1:], y_pos[1:])):
targ_circ.set_pos((x, y), units='deg')
big_circ.draw()
targ_circ.draw()
ec.flip()
if not el.wait_for_fix([x, y], max_wait=5., units='deg'):
print('Fixation {0} failed'.format(ii + 1))
ec.trial_ok()
el.stop() # stop recording to save the file
ec.screen_prompt('All done!', max_wait=1.0)
# eyelink auto-closes (el.close()) because it gets registered with EC
ea.plot_screen(screenshot)
| {
"content_hash": "ce09ea2df2c8b4ea29750a0c8c583e19",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 37.984375,
"alnum_prop": 0.5857671740024681,
"repo_name": "Eric89GXL/expyfun",
"id": "e1270600fa48da65d8ef6b2666a299e00fee62ba",
"size": "2431",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/experiments/eyetracking_experiment_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "451111"
}
],
"symlink_target": ""
} |
from netCDF4 import Dataset
import json
# In[ ]:
# Assume that /projects/CHARIS is sshfs mounted on this machine, and
# that the user has write permission
fid = Dataset('~/projects/CHARIS/snow_cover/modice.v0.4/min05yr_nc/MODICE.v0.4.1test.nc', 'w', format='NETCDF4')
fid.Conventions = "CF-1.6"
fid = Dataset('/home/vagrant/measures-byu/src/prod/cetb_file/templates/cetb_global_template.nc', 'w', format='NETCDF4')
fid.Conventions = "CF-1.6"
fid.title = "MODICE mask for a minimum number of years"
fid.product_version = "v0.4"
#fid.software_version_id = "TBD"
#fid.software_repository = "[email protected]:nsidc/measures-byu.git"
fid.source = "MODICE"
fid.source_version_id = "v04"
fid.history = ""
fid.comment = "Mask locations with 2 indicate MODICE for >= min_years."
fid.references = "Painter, T. H., Brodzik, M. J., A. Racoviteanu, R. Armstrong. 2012. Automated mapping of Earth's annual minimum exposed snow and ice with MODIS. Geophysical Research Letters, 39(20):L20501, doi:10.1029/2012GL053340."
fid.summary = ["An improved, enhanced-resolution, gridded passive microwave Earth System Data Record \n",
"for monitoring cryospheric and hydrologic time series\n" ]fid.title = "MEaSUREs Calibrated Passive Microwave Daily EASE-Grid 2.0 Brightness Temperature ESDR"
fid.institution = ["National Snow and Ice Data Center\n",
"Cooperative Institute for Research in Environmental Sciences\n",
"University of Colorado at Boulder\n",
"Boulder, CO"]
fid.publisher = ["National Snow and Ice Data Center\n",
"Cooperative Institute for Research in Environmental Sciences\n",
"University of Colorado at Boulder\n",
"Boulder, CO"]
fid.publisher_url = "http://nsidc.org/charis"
fid.publisher_email = "[email protected]"
fid.project = "CHARIS"
fid.standard_name_vocabulary = "CF Standard Name Table (v27, 28 September 2013)"
fid.cdm_data_type = "grid"
fid.keywords = "EARTH SCIENCE > SPECTRAL/ENGINEERING > MICROWAVE > BRIGHTNESS TEMPERATURE"
fid.keywords_vocabulary = "NASA Global Change Master Directory (GCMD) Earth Science Keywords, Version 8.1"
fid.platform = "TBD"
fid.sensor = "TBD"
fid.naming_authority = "org.doi.dx"
fid.id = "10.5067/MEASURES/CRYOSPHERE/nsidc-0630.001"
fid.date_created = "TBD"
fid.acknowledgement = ["This data set was created with funding from NASA MEaSUREs Grant #NNX13AI23A.\n",
"Data archiving and distribution is supported by the NASA NSIDC Distributed Active Archive Center (DAAC)."]
fid.license = "No constraints on data access or use"
fid.processing_level = "Level 3"
fid.creator_name = "Mary J. Brodzik"
fid.creator_email = "[email protected]"
fid.creator_url = "http://nsidc.org/charis"
fid.contributor_name = "T. H. Painter, M. J. Brodzik, R. L. Armstrong"
fid.contributor_role = "Principal Investigator, Co-Investigator, Co-Investigator"
fid.citation = ["Brodzik, M. J., D. G. Long, M. A. Hardman, A. C. Paget. 2015.\n",
"MEaSUREs Calibrated Passive Microwave Daily EASE-Grid 2.0 Brightness Temperature ESDR.\n",
"Version 0.01.\n",
"[Indicate subset used].\n",
"Boulder, Colorado USA: NASA DAAC at the National Snow and Ice Data Center." ]
| {
"content_hash": "90c4243e6c56ad1e8a00f9f56da11076",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 234,
"avg_line_length": 56.775862068965516,
"alnum_prop": 0.698451260249013,
"repo_name": "mjbrodzik/ipython_notebooks",
"id": "e3fcfcaabac86a0213b86b5859e734bfd4a0d2f2",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "charis/make_MODICEv04_min05yr_netcdf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42256555"
},
{
"name": "MATLAB",
"bytes": "3333"
},
{
"name": "Python",
"bytes": "68180"
},
{
"name": "TeX",
"bytes": "87727"
}
],
"symlink_target": ""
} |
import os, sys
import numpy as np
from optparse import OptionParser
from py_paddle import swig_paddle, DataProviderConverter
from paddle.trainer.PyDataProvider2 import integer_value_sequence
from paddle.trainer.config_parser import parse_config
"""
Usage: run following command to show help message.
python predict.py -h
"""
class SentimentPrediction():
def __init__(self, train_conf, dict_file, model_dir=None, label_file=None):
"""
train_conf: trainer configure.
dict_file: word dictionary file name.
model_dir: directory of model.
"""
self.train_conf = train_conf
self.dict_file = dict_file
self.word_dict = {}
self.dict_dim = self.load_dict()
self.model_dir = model_dir
if model_dir is None:
self.model_dir = os.path.dirname(train_conf)
self.label = None
if label_file is not None:
self.load_label(label_file)
conf = parse_config(train_conf, "is_predict=1")
self.network = swig_paddle.GradientMachine.createFromConfigProto(
conf.model_config)
self.network.loadParameters(self.model_dir)
input_types = [integer_value_sequence(self.dict_dim)]
self.converter = DataProviderConverter(input_types)
def load_dict(self):
"""
Load dictionary from self.dict_file.
"""
for line_count, line in enumerate(open(self.dict_file, 'r')):
self.word_dict[line.strip().split('\t')[0]] = line_count
return len(self.word_dict)
def load_label(self, label_file):
"""
Load label.
"""
self.label = {}
for v in open(label_file, 'r'):
self.label[int(v.split('\t')[1])] = v.split('\t')[0]
def get_index(self, data):
"""
transform word into integer index according to the dictionary.
"""
words = data.strip().split()
word_slot = [self.word_dict[w] for w in words if w in self.word_dict]
return word_slot
def batch_predict(self, data_batch):
input = self.converter(data_batch)
output = self.network.forwardTest(input)
prob = output[0]["value"]
labs = np.argsort(-prob)
for idx, lab in enumerate(labs):
if self.label is None:
print("predicting label is %d" % (lab[0]))
else:
print("predicting label is %s" % (self.label[lab[0]]))
def option_parser():
usage = "python predict.py -n config -w model_dir -d dictionary -i input_file "
parser = OptionParser(usage="usage: %s [options]" % usage)
parser.add_option(
"-n",
"--tconf",
action="store",
dest="train_conf",
help="network config")
parser.add_option(
"-d",
"--dict",
action="store",
dest="dict_file",
help="dictionary file")
parser.add_option(
"-b",
"--label",
action="store",
dest="label",
default=None,
help="dictionary file")
parser.add_option(
"-c",
"--batch_size",
type="int",
action="store",
dest="batch_size",
default=1,
help="the batch size for prediction")
parser.add_option(
"-w",
"--model",
action="store",
dest="model_path",
default=None,
help="model path")
return parser.parse_args()
def main():
options, args = option_parser()
train_conf = options.train_conf
batch_size = options.batch_size
dict_file = options.dict_file
model_path = options.model_path
label = options.label
swig_paddle.initPaddle("--use_gpu=0")
predict = SentimentPrediction(train_conf, dict_file, model_path, label)
batch = []
for line in sys.stdin:
words = predict.get_index(line)
if words:
batch.append([words])
else:
print('All the words in [%s] are not in the dictionary.' % line)
if len(batch) == batch_size:
predict.batch_predict(batch)
batch = []
if len(batch) > 0:
predict.batch_predict(batch)
if __name__ == '__main__':
main()
| {
"content_hash": "01f0b8deef2a75566a0bf0922e5c20df",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 83,
"avg_line_length": 30.057142857142857,
"alnum_prop": 0.5715304182509505,
"repo_name": "livc/Paddle",
"id": "64c78e0d6b9297e7a321a4f070517593b0bfe332",
"size": "4818",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "demo/sentiment/predict.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "226899"
},
{
"name": "C++",
"bytes": "3146268"
},
{
"name": "CMake",
"bytes": "121374"
},
{
"name": "CSS",
"bytes": "21730"
},
{
"name": "Cuda",
"bytes": "497938"
},
{
"name": "HTML",
"bytes": "9018"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Protocol Buffer",
"bytes": "43517"
},
{
"name": "Python",
"bytes": "995181"
},
{
"name": "Shell",
"bytes": "109514"
}
],
"symlink_target": ""
} |
import pytest
from ipaqe_dyndir.builtin.repos import (
UpdatesTestingRepositoryPlugin,
COPRPlugin
)
from ipaqe_dyndir.plugin.base import PluginConfigError
test_host = dict(name='test.example.com', role='master')
host_updates_enabled = {'enable_updates_testing': True}
host_updates_disabled = {'enable_updates_testing': False}
test_data_updates_testing = [
(True, test_host, host_updates_enabled),
(False, test_host, host_updates_disabled)
]
test_copr_freeipa = {'copr_repositories': ['@freeipa/freeipa-master']}
test_data_copr = [
([], test_host, {}), # if empty list, do not generate the variable
(tuple(), test_host, {}), # take also immutables
(['@freeipa/freeipa-master'], test_host, test_copr_freeipa),
(('@freeipa/freeipa-master',), test_host, test_copr_freeipa)
]
def test_updates_testing_invalid_config():
with pytest.raises(PluginConfigError):
UpdatesTestingRepositoryPlugin({})
@pytest.mark.parametrize(
'conf,host,exp_res', test_data_updates_testing)
def test_updates_config_configured(conf, host, exp_res):
pl = UpdatesTestingRepositoryPlugin(conf)
res = pl(host)
assert (
res['enable_updates_testing'] == exp_res['enable_updates_testing'])
def test_copr_plugin_invalid_config():
with pytest.raises(PluginConfigError):
COPRPlugin({})
@pytest.mark.parametrize(
'conf,host,exp_res', test_data_copr)
def test_copr_repo_configs(conf, host, exp_res):
pl = COPRPlugin(conf)
res = pl(host)
if not exp_res:
assert not res
else:
assert (
res['copr_repositories'] == exp_res['copr_repositories'])
| {
"content_hash": "21deeebd6401ec601a8f7add915de7b0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 27.88135593220339,
"alnum_prop": 0.6802431610942249,
"repo_name": "apophys/ipaqe-dyndir",
"id": "fbd4e6e0f17a7101e2812bb4d7632ae2e17745b1",
"size": "1684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_builtin_plugins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22942"
}
],
"symlink_target": ""
} |
from functools import partial
import pytest
from plenum.common.util import getNoInstances
from stp_core.common.util import adict
from plenum.test import waits
from plenum.test.malicious_behaviors_node import makeNodeFaulty, \
delaysPrePrepareProcessing, \
changesRequest
from plenum.test.node_request.node_request_helper import checkPrePrepared
nodeCount = 7
# f + 1 faults, i.e, num of faults greater than system can tolerate
faultyNodes = 3
whitelist = ['InvalidSignature',
'cannot process incoming PREPARE']
delayPrePrepareSec = 60
@pytest.fixture(scope="module")
def setup(txnPoolNodeSet):
# Making nodes faulty such that no primary is chosen
E = txnPoolNodeSet[-3]
G = txnPoolNodeSet[-2]
Z = txnPoolNodeSet[-1]
for node in E, G, Z:
makeNodeFaulty(node,
changesRequest, partial(delaysPrePrepareProcessing,
delay=delayPrePrepareSec))
return adict(faulties=(E, G, Z))
@pytest.fixture(scope="module")
def afterElection(setup):
for n in setup.faulties:
for r in n.replicas.values():
assert not r.isPrimary
@pytest.fixture(scope="module")
def preprepared1WithDelay(looper, txnPoolNodeSet, propagated1, faultyNodes):
timeouts = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delayPrePrepareSec
checkPrePrepared(looper,
txnPoolNodeSet,
propagated1,
range(getNoInstances(len(txnPoolNodeSet))),
faultyNodes,
timeout=timeouts)
def testNumOfPrepareWithFPlusOneFaults(
afterElection, noRetryReq, preprepared1WithDelay):
pass
| {
"content_hash": "105e4b5d9c44cf4dedb5d61edd8beb47",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 85,
"avg_line_length": 31.574074074074073,
"alnum_prop": 0.6774193548387096,
"repo_name": "evernym/zeno",
"id": "cd871a536885c5fe4239dbbd5379ee901e9de2a7",
"size": "1705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/node_request/test_prepare/test_num_of_prepare_with_f_plus_one_faults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0033_remove_golive_expiry_help_text'),
]
operations = [
migrations.CreateModel(
name='StreamfieldsSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('collapse_streamfields', models.BooleanField(default=True)),
('pre_selected_colors', models.TextField(blank=True, help_text="Gescheiden kleuren met een ';' (max. 7)", verbose_name='Pre-kleuren')),
('google_api_key', models.CharField(help_text='API Key van Google', max_length=255)),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
]
| {
"content_hash": "0d646b44c55be56f10b053f8d23e0dd2",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 151,
"avg_line_length": 36.724137931034484,
"alnum_prop": 0.6028169014084507,
"repo_name": "UWKM/uwkm_streamfields",
"id": "33b00ba1b482e4c47df32e5b8ecc0d5e69b6d460",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uwkm_streamfields/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "741695"
},
{
"name": "HTML",
"bytes": "26682"
},
{
"name": "JavaScript",
"bytes": "41724"
},
{
"name": "Python",
"bytes": "40995"
}
],
"symlink_target": ""
} |
from feedin.engine import Engine
from feedin.model import Context
from elasticsearch import Elasticsearch
import time
import datetime
import uuid
if __name__ == '__main__':
setting_file = 'sina.xml'
engine = Engine()
es = Elasticsearch()
job = engine.create(setting_file)
page = 0
while True:
page = page + 1
context = Context()
job.execute(page, context)
for item in context.items:
item_body = item
if not 'id' in item_body:
item_body['id'] = str(uuid.uuid4())
timestamp = datetime.datetime.now()
item_body['ts'] = timestamp
es.index(index='sina', doc_type='zt', id=item_body['id'], body=item_body)
print item_body['url']
print item_body['public']
time.sleep(2) | {
"content_hash": "e6defdf678d031e9665924997c939673",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 85,
"avg_line_length": 30.77777777777778,
"alnum_prop": 0.5752105896510229,
"repo_name": "lyssym/crawler",
"id": "5d384abcf1f98a8968119581838f175282a2f9d4",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sina_finance_zt/start.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "106297"
},
{
"name": "Python",
"bytes": "71751"
}
],
"symlink_target": ""
} |
import os
import webob.dec
import webob.exc
from traffic.api.openstack import wsgi
from traffic import context
from traffic import flags
from traffic.openstack.common import log as logging
from traffic import wsgi as base_wsgi
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DECLARE('use_forwarded_for', 'traffic.api.auth')
class NoAuthMiddleware(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
os_url = os.path.join(req.url, project_id)
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['traffic.context'] = ctx
return self.application
| {
"content_hash": "a30bdb1bc073640fc6609916e51531ad",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 38.458333333333336,
"alnum_prop": 0.5861321776814734,
"repo_name": "fengkaicnic/traffic",
"id": "3d7b9483f704c04e1dd46c11bface68353f2afa0",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traffic/api/openstack/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1097992"
},
{
"name": "Shell",
"bytes": "8878"
}
],
"symlink_target": ""
} |
import os
from slackly import SlackClient, SlackAPI, SlackEventParsed
from slackly.client.response_factory import SlackAPIObjectResponse
import slackly.schema.endpoints
def main():
client = SlackClient(
token=os.environ['SLACK_TOKEN'],
response_factory=SlackAPIObjectResponse,
)
slack = SlackAPI(bind=client)
if __name__ == '__main__':
main()
| {
"content_hash": "57bf0769c44a7cfe880cb4733e8fd175",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 23.6875,
"alnum_prop": 0.712401055408971,
"repo_name": "huntcsg/slackly",
"id": "3d31b9731e11e710804b05da4b098b39742d5dbe",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/slackly/examples/start_here.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362475"
},
{
"name": "Shell",
"bytes": "1497"
}
],
"symlink_target": ""
} |
import json
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404, render, redirect
from django.views import generic
from .forms import *
from .models import *
from django.contrib.auth.models import User, Group
from foraliving.recording import *
# Create your views here.
def index(request):
if request.user.is_authenticated():
return redirect('videos')
else:
videoDemo = Video.objects.filter(tags__contains='homepage', tags__icontains='demo')
videos = Video.objects.filter(tags__contains='homepage', ).exclude(tags__icontains='demo')
return render(request, 'homepage.html', {'videos': videos, 'videoDemo': videoDemo,})
def twopage(request):
return render(request, 'home.html')
def sitetheme(request):
return render(request, 'FAL_Theme.html')
def interviewSetup(request):
return render(request, 'interview_setup.html')
def record(request):
return render(request, 'record.html')
class VolunteerForm(generic.View):
"""
Class to create new volunters on the sysmtem
"""
volunteer_view = 'volunteer/volunteer_signup.html'
login_view = login_url = settings.LOGIN_URL
def get(self, request):
"""
Method to render the template
:param request:
:return:
"""
email = request.GET.get('email')
phone = request.GET.get('phone')
last_name = request.GET.get('last_name')
first_name = request.GET.get('first_name')
workTitle = request.GET.get('workTitle')
userForm = volunteerUserSignupForm()
infoForm = volunteerSignupForm()
return render(request, self.volunteer_view,
{'userForm': userForm, 'infoForm': infoForm, 'email': email, 'phone': phone, 'first_name': first_name, 'last_name': last_name, 'workTitle': workTitle})
def post(self, request):
"""
Method to save the data from the volunteer form
:param request:
:return:
"""
if request.method == 'POST':
userForm = volunteerUserSignupForm(request.POST)
infoForm = volunteerSignupForm(request.POST)
if not userForm.is_valid() or not infoForm.is_valid():
return render(request, self.volunteer_view,
{'userForm': userForm, 'infoForm': infoForm})
if userForm.is_valid():
newUser = userForm.save(commit=False)
if infoForm.is_valid():
newVolunteer = infoForm.save(commit=False)
#encrypted password
newUser.set_password(newUser.password)
newUser.save()
type = Type.objects.get(name='Volunteer')
user_type = User_Type.objects.create(type=type, user=newUser)
user_type.save()
newVolunteer.user = User.objects.get(username=newUser.username)
newVolunteer.save()
return HttpResponse(newVolunteer.id)
def uniqueEmail(request):
"""
Method to validate if the email exist on the system
:param request:
:return:
"""
if request.is_ajax():
email = request.GET.get('email')
count_user = (User.objects.filter(email__iexact=email).count())
if count_user >= 1:
return HttpResponse('true')
else:
return HttpResponse('false')
def uniqueUsername(request):
"""
Method to validate if the username exist on the system
:param request:
:return:
"""
if request.is_ajax():
username = request.GET.get('username')
count_user = (User.objects.filter(username__iexact=username).count())
if count_user >= 1:
return HttpResponse('true')
else:
return HttpResponse('false')
def categories(request):
"""
Method to get the skills saved on the system
:param request:
:return:
"""
if request.method == 'GET':
skill = Skill.objects.all()
new_skill = []
for data in skill:
new_skill.append(data.name)
return HttpResponse(json.dumps(new_skill))
def interests(request):
"""
Method to get the interests saved on the system
:param request:
:return:
"""
if request.method == 'GET':
interest = Interest.objects.all()
new_interest = []
for data in interest:
new_interest.append(data.name)
return HttpResponse(json.dumps(new_interest))
def createSkill(request, volunteer_id):
"""
Method to create skills and interests
:param request:
:param volunteer_id:
:return:
"""
if request.method == 'POST':
volunteer = Volunteer_User_Add_Ons.objects.get(pk=volunteer_id)
skills = request.POST.getlist('skills')
interests = request.POST.getlist('interests')
# call to create the skills
createInputToken(request, skills, 'Skill', volunteer_id)
# call to create the interests
createInputToken(request, interests, 'Interest', volunteer_id)
return HttpResponse('ok')
def createInputToken(request, dataInput, model, volunteer_id):
"""
Method to create the skills or interests
:param request:
:param data:
:param model:
:param relation:
:return:
"""
volunteer = Volunteer_User_Add_Ons.objects.get(pk=volunteer_id)
# for loop to list of the Skills
for data in dataInput:
data = json.loads(data)
# for loop to the array objects of the Skill input
for field in data:
# for loop to objects with the values of the skills
for key, value in field.items():
if key == "value":
try:
# try get the skill or interest with the name saved in the variable "value"
if model == "Skill":
variable = Skill.objects.get(name__iexact=value)
else:
variable = Interest.objects.get(name__iexact=value)
except:
# if the object doesn't exist, the Skill or interest object is saved
if model == "Skill":
variable = Skill(name=value)
else:
variable = Interest(name=value)
variable.save()
# Add the skill or interest object to the skills or interests relation
if model == "Skill":
volunteer.skills.add(variable)
else:
volunteer.interests.add(variable)
| {
"content_hash": "232e5e617289f0f04bdccbaa81670ce2",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 173,
"avg_line_length": 32.83574879227053,
"alnum_prop": 0.5848168309548331,
"repo_name": "foraliving/pilot",
"id": "a31a70e71980ae4bd42bc31a6c9813d4ca660bbe",
"size": "6797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foraliving/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1049"
},
{
"name": "CSS",
"bytes": "215607"
},
{
"name": "HTML",
"bytes": "293009"
},
{
"name": "JavaScript",
"bytes": "636123"
},
{
"name": "Python",
"bytes": "176766"
}
],
"symlink_target": ""
} |
from plone.indexer import indexer
from zope.interface import Interface
import plone.api
@indexer(Interface)
def customer_role(obj):
"""Index users and groups with ``Customer`` role directly on the context.
Don't index inherited `Customer` role. Groups are prefixed with ``group:``
"""
users = obj.users_with_local_role('Customer') # get non-aquired roles
ret = [plone.api.group.get(it) and 'group:%s' % it or it for it in users]
return ret
| {
"content_hash": "2468f0a56e7eb068c09d7b6b538368c9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 33.5,
"alnum_prop": 0.7014925373134329,
"repo_name": "TheVirtualLtd/bda.plone.orders",
"id": "4ca2a589a3826e22fcf54987c0b1f250a2e85469",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bda/plone/orders/indexer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24454"
},
{
"name": "JavaScript",
"bytes": "54214"
},
{
"name": "Python",
"bytes": "218553"
},
{
"name": "Shell",
"bytes": "2972"
}
],
"symlink_target": ""
} |
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.87.0"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
| {
"content_hash": "e6405db42184a48999d7295f07f158c0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 92,
"avg_line_length": 43.2,
"alnum_prop": 0.8277777777777777,
"repo_name": "tiangolo/fastapi",
"id": "afdc94874c4f38e59302e74893d35fa8e1640836",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastapi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
""" This file is a modified version of rlcompleter.py from the Python
project under the Python Software Foundation License 2:
https://github.com/python/cpython/blob/master/Lib/rlcompleter.py
https://github.com/python/cpython/blob/master/LICENSE
The only changes made were to modify the regular expression in attr_matches
and all code that relied on GNU readline (the later more for readability as
it wasn't required).
--------------
Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import atexit
import __main__
import inspect
import sys
from typing import Optional
__all__ = ["Completer"]
def fnsignature(obj):
try:
sig = str(inspect.signature(obj))
except:
sig = "()"
return sig
class Completer:
def __init__(self, namespace=None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text: str, state) -> Optional[str]:
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if not text.strip():
if state == 0:
return '\t'
else:
return None
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if callable(val) and not inspect.isclass(val):
word = word + fnsignature(val)
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
seen = {"__builtins__"}
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
seen.add(word)
if word in {'finally', 'try'}:
word = word + ':'
elif word not in {'False', 'None', 'True', 'break', 'continue', 'pass', 'else'}:
word = word + ' '
matches.append(word)
#Not sure why in the console builtins becomes a dict but this works for now.
if hasattr(__builtins__, '__dict__'): # type: ignore # remove this ignore > pyright 1.1.149
builtins = __builtins__.__dict__ # type: ignore # remove this ignore > pyright 1.1.149
else:
builtins = __builtins__ # type: ignore # remove this ignore > pyright 1.1.149
for nspace in [self.namespace, builtins]:
for word, val in nspace.items():
if word[:n] == text and word not in seen:
seen.add(word)
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"([\w\[\]]+(\.[\w\[\]]+)*)\.([\w\[\]]*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = set(dir(thisobject))
words.discard("__builtins__")
if hasattr(thisobject, '__class__'):
words.add('__class__')
words.update(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
if attr == '':
noprefix = '_'
elif attr == '_':
noprefix = '__'
else:
noprefix = None
while True:
for word in words:
if (word[:n] == attr and not (noprefix and word[:n + 1] == noprefix)):
match = f"{expr}.{word}"
try:
val = inspect.getattr_static(thisobject, word)
except Exception:
pass # Include even if attribute not set
else:
match = self._callable_postfix(val, match)
matches.append(match)
if matches or not noprefix:
break
if noprefix == '_':
noprefix = '__'
else:
noprefix = None
matches.sort()
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass, '__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
| {
"content_hash": "502b6fd31aeff25d5a10c1892adcdd6c",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 94,
"avg_line_length": 30.281690140845072,
"alnum_prop": 0.6789147286821705,
"repo_name": "Vector35/binaryninja-api",
"id": "9a33659b1d0e9083425bad3ae14fac62797cd7a7",
"size": "6450",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "python/bncompleter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7858"
},
{
"name": "C",
"bytes": "34178"
},
{
"name": "C++",
"bytes": "3321511"
},
{
"name": "CMake",
"bytes": "18669"
},
{
"name": "CSS",
"bytes": "212519"
},
{
"name": "HTML",
"bytes": "23547"
},
{
"name": "JavaScript",
"bytes": "16727"
},
{
"name": "Makefile",
"bytes": "8363"
},
{
"name": "Python",
"bytes": "2299592"
},
{
"name": "Rust",
"bytes": "597644"
},
{
"name": "Shell",
"bytes": "5702"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'r6sapi.py'
copyright = '2016, Billyoyo'
author = 'Billyoyo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
rst_prolog = """
.. |coro| replace:: This function is a |corourl|_.
.. |corourl| replace:: *coroutine*
.. _corourl: https://docs.python.org/3/library/asyncio-task.html#coroutine
"""
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'r6sapipydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'r6sapipy.tex', 'r6sapi.py Documentation',
'Billyoyo', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'r6sapipy', 'r6sapi.py Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'r6sapipy', 'r6sapi.py Documentation',
author, 'r6sapipy', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "cac74b8544d8c8149cefb07018cd24a5",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 78,
"avg_line_length": 29.047619047619047,
"alnum_prop": 0.660655737704918,
"repo_name": "billy-yoyo/RainbowSixSiege-Python-API",
"id": "1c7f3cbb2b08d8da59b9a61293e27b49ac45a100",
"size": "4955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86813"
}
],
"symlink_target": ""
} |
import socket
from unittest import mock
import pytest
from aiohttp.tcp_helpers import CORK, tcp_cork, tcp_nodelay
has_ipv6 = socket.has_ipv6
if has_ipv6:
# The socket.has_ipv6 flag may be True if Python was built with IPv6
# support, but the target system still may not have it.
# So let's ensure that we really have IPv6 support.
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
except OSError:
has_ipv6 = False
# nodelay
def test_tcp_nodelay_exception():
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
s.setsockopt.assert_called_with(
socket.IPPROTO_TCP,
socket.TCP_NODELAY,
True
)
def test_tcp_nodelay_enable():
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_tcp_nodelay_enable_and_disable():
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
tcp_nodelay(transport, False)
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
def test_tcp_nodelay_enable_ipv6():
transport = mock.Mock()
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
def test_tcp_nodelay_enable_unix():
# do not set nodelay for unix socket
transport = mock.Mock()
s = mock.Mock(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert not s.setsockopt.called
def test_tcp_nodelay_enable_no_socket():
transport = mock.Mock()
transport.get_extra_info.return_value = None
tcp_nodelay(transport, True)
# cork
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_tcp_cork_enable():
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_and_disable():
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
tcp_cork(transport, False)
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_ipv6():
transport = mock.Mock()
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_unix():
transport = mock.Mock()
s = mock.Mock(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert not s.setsockopt.called
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_no_socket():
transport = mock.Mock()
transport.get_extra_info.return_value = None
tcp_cork(transport, True)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_exception():
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
s.setsockopt.assert_called_with(
socket.IPPROTO_TCP,
CORK,
True
)
| {
"content_hash": "c4b3467b1a337874de0c815aefa07967",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 75,
"avg_line_length": 32.4,
"alnum_prop": 0.6828437633035335,
"repo_name": "rutsky/aiohttp",
"id": "70f7357a01800c5ed5166d84cae6755fe96808d9",
"size": "4698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tcp_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "C",
"bytes": "187294"
},
{
"name": "Gherkin",
"bytes": "266"
},
{
"name": "Makefile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1487288"
},
{
"name": "Shell",
"bytes": "2877"
}
],
"symlink_target": ""
} |
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import InvalidOrder
class kucoin (Exchange):
def describe(self):
return self.deep_extend(super(kucoin, self).describe(), {
'id': 'kucoin',
'name': 'Kucoin',
'countries': 'HK', # Hong Kong
'version': 'v1',
'rateLimit': 2000,
'hasCORS': False,
'userAgent': self.userAgents['chrome'],
# obsolete metainfo interface
'hasFetchTickers': True,
'hasFetchOHLCV': False, # see the method implementation below
'hasFetchOrder': True,
'hasFetchOrders': True,
'hasFetchClosedOrders': True,
'hasFetchOpenOrders': True,
'hasFetchMyTrades': False,
'hasFetchCurrencies': True,
'hasWithdraw': True,
# new metainfo interface
'has': {
'fetchTickers': True,
'fetchOHLCV': True, # see the method implementation below
'fetchOrder': True,
'fetchOrders': True,
'fetchClosedOrders': True,
'fetchOpenOrders': True,
'fetchMyTrades': False,
'fetchCurrencies': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'8h': '480',
'1d': 'D',
'1w': 'W',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/33795655-b3c46e48-dcf6-11e7-8abe-dc4588ba7901.jpg',
'api': 'https://api.kucoin.com',
'www': 'https://kucoin.com',
'doc': 'https://kucoinapidocs.docs.apiary.io',
'fees': 'https://news.kucoin.com/en/fee',
},
'api': {
'public': {
'get': [
'open/chart/config',
'open/chart/history',
'open/chart/symbol',
'open/currencies',
'open/deal-orders',
'open/kline',
'open/lang-list',
'open/orders',
'open/orders-buy',
'open/orders-sell',
'open/tick',
'market/open/coin-info',
'market/open/coins',
'market/open/coins-trending',
'market/open/symbols',
],
},
'private': {
'get': [
'account/balance',
'account/{coin}/wallet/address',
'account/{coin}/wallet/records',
'account/{coin}/balance',
'account/promotion/info',
'account/promotion/sum',
'deal-orders',
'order/active',
'order/active-map',
'order/dealt',
'referrer/descendant/count',
'user/info',
],
'post': [
'account/{coin}/withdraw/apply',
'account/{coin}/withdraw/cancel',
'cancel-order',
'order',
'user/change-lang',
],
},
},
'fees': {
'trading': {
'maker': 0.0010,
'taker': 0.0010,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'KCS': 2.0,
'BTC': 0.0005,
'USDT': 10.0,
'ETH': 0.01,
'LTC': 0.001,
'NEO': 0.0,
'GAS': 0.0,
'KNC': 0.5,
'BTM': 5.0,
'QTUM': 0.1,
'EOS': 0.5,
'CVC': 3.0,
'OMG': 0.1,
'PAY': 0.5,
'SNT': 20.0,
'BHC': 1.0,
'HSR': 0.01,
'WTC': 0.1,
'VEN': 2.0,
'MTH': 10.0,
'RPX': 1.0,
'REQ': 20.0,
'EVX': 0.5,
'MOD': 0.5,
'NEBL': 0.1,
'DGB': 0.5,
'CAG': 2.0,
'CFD': 0.5,
'RDN': 0.5,
'UKG': 5.0,
'BCPT': 5.0,
'PPT': 0.1,
'BCH': 0.0005,
'STX': 2.0,
'NULS': 1.0,
'GVT': 0.1,
'HST': 2.0,
'PURA': 0.5,
'SUB': 2.0,
'QSP': 5.0,
'POWR': 1.0,
'FLIXX': 10.0,
'LEND': 20.0,
'AMB': 3.0,
'RHOC': 2.0,
'R': 2.0,
'DENT': 50.0,
'DRGN': 1.0,
'ACT': 0.1,
},
'deposit': 0.00,
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarketOpenSymbols()
markets = response['data']
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['symbol']
base = market['coinType']
quote = market['coinTypePair']
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
active = market['trading']
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'info': market,
'lot': math.pow(10, -precision['amount']),
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': None,
},
'price': {
'min': None,
'max': None,
},
},
}))
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetMarketOpenCoins(params)
currencies = response['data']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['coin']
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.common_currency_code(id)
precision = currency['tradePrecision']
deposit = currency['enableDeposit']
withdraw = currency['enableWithdraw']
active = (deposit and withdraw)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': currency['name'],
'active': active,
'status': 'ok',
'fee': currency['withdrawFeeRate'], # todo: redesign
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': currency['withdrawMinAmount'],
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccountBalance(self.extend({
'limit': 20, # default 12, max 20
'page': 1,
}, params))
balances = response['data']
result = {'info': balances}
indexed = self.index_by(balances, 'coinType')
keys = list(indexed.keys())
for i in range(0, len(keys)):
id = keys[i]
currency = self.common_currency_code(id)
account = self.account()
balance = indexed[id]
used = float(balance['freezeBalance'])
free = float(balance['balance'])
total = self.sum(free, used)
account['free'] = free
account['used'] = used
account['total'] = total
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetOpenOrders(self.extend({
'symbol': market['id'],
}, params))
orderbook = response['data']
return self.parse_order_book(orderbook, None, 'BUY', 'SELL')
def parse_order(self, order, market=None):
symbol = None
if market:
symbol = market['symbol']
else:
symbol = order['coinType'] + '/' + order['coinTypePair']
timestamp = order['createdAt']
price = order['price']
filled = order['dealAmount']
remaining = order['pendingAmount']
amount = self.sum(filled, remaining)
side = order['direction'].lower()
result = {
'info': order,
'id': self.safe_string(order, 'oid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
'cost': price * filled,
'filled': filled,
'remaining': remaining,
'status': None,
'fee': self.safe_float(order, 'fee'),
}
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' fetchOpenOrders requires a symbol param')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.privateGetOrderActiveMap(self.extend(request, params))
orders = self.array_concat(response['data']['SELL'], response['data']['BUY'])
return self.parse_orders(orders, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {}
await self.load_markets()
market = self.market(symbol)
if symbol:
request['symbol'] = market['id']
if since:
request['since'] = since
if limit:
request['limit'] = limit
response = await self.privateGetOrderDealt(self.extend(request, params))
return self.parse_orders(response['data']['datas'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
market = self.market(symbol)
base = market['base']
order = {
'symbol': market['id'],
'type': side.upper(),
'price': self.price_to_precision(symbol, price),
'amount': self.truncate(amount, self.currencies[base]['precision']),
}
response = await self.privatePostOrder(self.extend(order, params))
return {
'info': response,
'id': self.safe_string(response['data'], 'orderOid'),
}
async def cancel_order(self, id, symbol=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' cancelOrder requires symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'orderOid': id,
}
if 'type' in params:
request['type'] = params['type'].upper()
else:
raise ExchangeError(self.id + ' cancelOrder requires type(BUY or SELL) param')
response = await self.privatePostCancelOrder(self.extend(request, params))
return response
def parse_ticker(self, ticker, market=None):
timestamp = ticker['datetime']
symbol = None
if market:
symbol = market['symbol']
else:
symbol = ticker['coinType'] + '/' + ticker['coinTypePair']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'ask': self.safe_float(ticker, 'sell'),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': self.safe_float(ticker, 'lastDealPrice'),
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': self.safe_float(ticker, 'volValue'),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
response = await self.publicGetMarketOpenSymbols(params)
tickers = response['data']
result = {}
for t in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[t])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetOpenTick(self.extend({
'symbol': market['id'],
}, params))
ticker = response['data']
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = trade[0]
side = None
if trade[1] == 'BUY':
side = 'buy'
elif trade[1] == 'SELL':
side = 'sell'
return {
'id': None,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': 'limit',
'side': side,
'price': trade[2],
'amount': trade[3],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetOpenDealOrders(self.extend({
'symbol': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
timestamp = self.parse8601(ohlcv['T'])
return [
timestamp,
ohlcv['O'],
ohlcv['H'],
ohlcv['L'],
ohlcv['C'],
ohlcv['V'],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
to = self.seconds()
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
'from': to - 86400,
'to': to,
}
if since:
request['from'] = int(since / 1000)
# limit is not documented in api call, and not respected
if limit:
request['limit'] = limit
response = await self.publicGetOpenChartHistory(self.extend(request, params))
# we need buildOHLCV
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'] + endpoint
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
# their nonce is always a calibrated synched milliseconds-timestamp
nonce = self.milliseconds()
queryString = ''
nonce = str(nonce)
if query:
queryString = self.rawencode(self.keysort(query))
url += '?' + queryString
if method != 'GET':
body = queryString
auth = endpoint + '/' + nonce + '/' + queryString
payload = base64.b64encode(self.encode(auth))
# payload should be "encoded" as returned from stringToBase64
signature = self.hmac(payload, self.encode(self.secret), hashlib.sha256)
headers = {
'KC-API-KEY': self.apiKey,
'KC-API-NONCE': nonce,
'KC-API-SIGNATURE': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def throw_exception_or_error_code(self, response):
if 'success' in response:
if not response['success']:
if 'code' in response:
message = self.safe_string(response, 'msg')
if response['code'] == 'UNAUTH':
if message == 'Invalid nonce':
raise InvalidNonce(self.id + ' ' + message)
raise AuthenticationError(self.id + ' ' + self.json(response))
elif response['code'] == 'ERROR':
if message.find('precision of amount') >= 0:
raise InvalidOrder(self.id + ' ' + message)
raise ExchangeError(self.id + ' ' + self.json(response))
def handle_errors(self, code, reason, url, method, headers, body):
if body and(body[0] == "{"):
response = json.loads(body)
self.throw_exception_or_error_code(response)
if code >= 400:
raise ExchangeError(self.id + ' ' + str(code) + ' ' + reason)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
self.throw_exception_or_error_code(response)
return response
| {
"content_hash": "e8567ea31620a3055a08ef45c5e82b68",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 126,
"avg_line_length": 37.87429643527204,
"alnum_prop": 0.4495962748303364,
"repo_name": "tritoanst/ccxt",
"id": "8ade41163c5d5253c4aeeaf254f10842d1b50050",
"size": "20212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ccxt/async/kucoin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3955653"
},
{
"name": "PHP",
"bytes": "783191"
},
{
"name": "Python",
"bytes": "680573"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
} |
"""
Tests of neo.io.stimfitio
"""
import sys
import unittest
from neo.io import StimfitIO
from neo.test.iotest.common_io_test import BaseTestIO
try:
import stfio
except Exception:
HAS_STFIO = False
else:
HAS_STFIO = True
@unittest.skipIf(sys.version_info[0] > 2, "not Python 3 compatible")
@unittest.skipUnless(HAS_STFIO, "requires stfio")
class TestStimfitIO(BaseTestIO, unittest.TestCase):
ioclass = StimfitIO
entities_to_download = [
'stimfit'
]
entities_to_test = [
'stimfit/File_stimfit_1.h5',
'stimfit/File_stimfit_2.h5',
'stimfit/File_stimfit_3.h5',
'stimfit/File_stimfit_4.h5',
'stimfit/File_stimfit_5.h5',
'stimfit/File_stimfit_6.h5',
]
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "43ceffca87b47bd16ded7e603d07904f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 68,
"avg_line_length": 20.763157894736842,
"alnum_prop": 0.6425855513307985,
"repo_name": "apdavison/python-neo",
"id": "c3a1d76c829800840254283a3b1c6e13e5aa0287",
"size": "813",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neo/test/iotest/test_stimfitio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2476868"
}
],
"symlink_target": ""
} |
"""
Structure connectivity class.
"""
import collections
import logging
import networkx as nx
import numpy as np
from monty.json import MSONable, jsanitize
from pymatgen.analysis.chemenv.connectivity.connected_components import (
ConnectedComponent,
)
from pymatgen.analysis.chemenv.connectivity.environment_nodes import (
get_environment_node,
)
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import (
LightStructureEnvironments,
)
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "1.0"
__maintainer__ = "David Waroquiers"
__email__ = "[email protected]"
__date__ = "June 25, 2019"
def get_delta_image(isite1, isite2, data1, data2):
"""
Helper method to get the delta image between one environment and another
from the ligand's delta images.
"""
if data1["start"] == isite1:
if data2["start"] == isite2:
return np.array(data1["delta"]) - np.array(data2["delta"])
return np.array(data1["delta"]) + np.array(data2["delta"])
if data2["start"] == isite2:
return -np.array(data1["delta"]) - np.array(data2["delta"])
return -np.array(data1["delta"]) + np.array(data2["delta"])
class StructureConnectivity(MSONable):
"""
Main class containing the connectivity of a structure.
"""
def __init__(
self,
light_structure_environment,
connectivity_graph=None,
environment_subgraphs=None,
):
"""
Constructor for the StructureConnectivity object.
Args:
light_structure_environment: a LightStructureEnvironments object
containing the relevant local environments
for the sites in the structure.
connectivity_graph: the networkx MultiGraph if it has already been computed,
e.g. stored in a file or dict and StructureConnectivity
is reconstructed from that file or dict.
environment_subgraphs: the different subgraphs of environments that have
been computed if any (as for connectivity_graph, only
if it is reconstructed from a file or dict).
"""
self.light_structure_environments = light_structure_environment
if connectivity_graph is None:
self._graph = nx.MultiGraph()
else:
self._graph = connectivity_graph
if environment_subgraphs is None:
self.environment_subgraphs = {}
else:
self.environment_subgraphs = environment_subgraphs
def environment_subgraph(self, environments_symbols=None, only_atoms=None):
"""
Args:
environments_symbols ():
only_atoms ():
Returns:
"""
if environments_symbols is not None:
self.setup_environment_subgraph(environments_symbols=environments_symbols, only_atoms=only_atoms)
try:
return self._environment_subgraph
except AttributeError:
all_envs = self.light_structure_environments.environments_identified()
self.setup_environment_subgraph(environments_symbols=all_envs, only_atoms=only_atoms)
return self._environment_subgraph
def add_sites(self):
"""
Add the sites in the structure connectivity graph.
"""
self._graph.add_nodes_from(list(range(len(self.light_structure_environments.structure))))
def add_bonds(self, isite, site_neighbors_set):
"""
Add the bonds for a given site index to the structure connectivity graph.
Args:
isite: Index of the site for which the bonds have to be added.
site_neighbors_set: site_neighbors_set: Neighbors set of the site
"""
existing_edges = self._graph.edges(nbunch=[isite], data=True)
for nb_index_and_image in site_neighbors_set.neighb_indices_and_images:
nb_index_unitcell = nb_index_and_image["index"]
nb_image_cell = nb_index_and_image["image_cell"]
exists = False
if np.allclose(nb_image_cell, np.zeros(3)):
for (isite1, ineighb1, data1) in existing_edges:
if np.allclose(data1["delta"], np.zeros(3)) and nb_index_unitcell == ineighb1:
exists = True
break
else:
if isite == nb_index_unitcell:
for (isite1, ineighb1, data1) in existing_edges:
if isite1 == ineighb1:
if np.allclose(data1["delta"], nb_image_cell) or np.allclose(
data1["delta"], -nb_image_cell
):
exists = True
break
else:
for (isite1, ineighb1, data1) in existing_edges:
if nb_index_unitcell == ineighb1:
if data1["start"] == isite:
if np.allclose(data1["delta"], nb_image_cell):
exists = True
break
elif data1["end"] == isite:
if np.allclose(data1["delta"], -nb_image_cell):
exists = True
break
else:
raise ValueError("SHOULD NOT HAPPEN ???")
if not exists:
self._graph.add_edge(
isite,
nb_index_unitcell,
start=isite,
end=nb_index_unitcell,
delta=nb_image_cell,
)
def setup_environment_subgraph(self, environments_symbols, only_atoms=None):
"""
Set up the graph for predefined environments and optionally atoms.
Args:
environments_symbols: Symbols of the environments for the environment subgraph.
only_atoms: Atoms to be considered.
"""
logging.info("Setup of environment subgraph for environments {}".format(", ".join(environments_symbols)))
if not isinstance(environments_symbols, collections.abc.Iterable):
environments_symbols = [environments_symbols]
environments_symbols = sorted(environments_symbols)
envs_string = "-".join(environments_symbols)
if only_atoms is not None:
envs_string += "#" + "-".join(sorted(only_atoms))
# Get it directly if it was already computed
if envs_string in self.environment_subgraphs:
self._environment_subgraph = self.environment_subgraphs[envs_string]
return
# Initialize graph for a subset of environments
self._environment_subgraph = nx.MultiGraph()
# Add the sites with the required environment(s)
for isite, ce_this_site_all in enumerate(self.light_structure_environments.coordination_environments):
if ce_this_site_all is None:
continue
if len(ce_this_site_all) == 0:
continue
ce_this_site = ce_this_site_all[0]["ce_symbol"]
if ce_this_site in environments_symbols:
if only_atoms is None:
env_node = get_environment_node(
self.light_structure_environments.structure[isite],
isite,
ce_this_site,
)
self._environment_subgraph.add_node(env_node)
else:
if self.light_structure_environments.structure.is_ordered:
if self.light_structure_environments.structure[isite].specie.symbol in only_atoms:
env_node = get_environment_node(
self.light_structure_environments.structure[isite],
isite,
ce_this_site,
)
self._environment_subgraph.add_node(env_node)
else:
# TODO: add the possibility of a "constraint" on the minimum percentage
# of the atoms on the site
this_site_elements = [
sp.symbol for sp in self.light_structure_environments.structure[isite].species_and_occu
]
for elem_symbol in this_site_elements:
if elem_symbol in only_atoms:
env_node = get_environment_node(
self.light_structure_environments.structure[isite],
isite,
ce_this_site,
)
self._environment_subgraph.add_node(env_node)
break
# Find the connections between the environments
nodes = list(self._environment_subgraph.nodes())
for inode1, node1 in enumerate(nodes):
isite1 = node1.isite
links_node1 = self._graph.edges(isite1, data=True)
for inode2, node2 in enumerate(nodes[inode1:]):
isite2 = node2.isite
links_node2 = self._graph.edges(isite2, data=True)
# We look for ligands that are common to both site1 and site2
connections_site1_site2 = {}
for (site1_1, ilig_site1, d1) in links_node1:
for (site2_1, ilig_site2, d2) in links_node2:
if ilig_site1 == ilig_site2:
delta_image = get_delta_image(isite1, isite2, d1, d2)
if isite1 == isite2 and np.all(delta_image == 0):
continue
tuple_delta_image = tuple(delta_image)
if tuple_delta_image in connections_site1_site2:
connections_site1_site2[tuple_delta_image].append((ilig_site1, d1, d2))
else:
connections_site1_site2[tuple_delta_image] = [(ilig_site1, d1, d2)]
# Remove the double self-loops ...
if isite1 == isite2:
remove_deltas = []
alldeltas = list(connections_site1_site2.keys())
alldeltas2 = list(connections_site1_site2.keys())
if (0, 0, 0) in alldeltas:
alldeltas.remove((0, 0, 0))
alldeltas2.remove((0, 0, 0))
for current_delta in alldeltas:
opp_current_delta = tuple(-dd for dd in current_delta)
if opp_current_delta in alldeltas2:
remove_deltas.append(current_delta)
alldeltas2.remove(current_delta)
alldeltas2.remove(opp_current_delta)
for remove_delta in remove_deltas:
connections_site1_site2.pop(remove_delta)
# Add all the edges
for conn, ligands in list(connections_site1_site2.items()):
self._environment_subgraph.add_edge(
node1,
node2,
start=node1.isite,
end=node2.isite,
delta=conn,
ligands=ligands,
)
self.environment_subgraphs[envs_string] = self._environment_subgraph
def setup_connectivity_description(self):
"""
Returns:
"""
pass
def get_connected_components(self, environments_symbols=None, only_atoms=None):
"""
Args:
environments_symbols ():
only_atoms ():
Returns:
"""
connected_components = []
env_subgraph = self.environment_subgraph(environments_symbols=environments_symbols, only_atoms=only_atoms)
for component_nodes in nx.connected_components(env_subgraph):
graph = env_subgraph.subgraph(component_nodes).copy()
connected_components.append(ConnectedComponent.from_graph(graph))
return connected_components
def setup_atom_environment_subgraph(self, atom_environment):
"""
Args:
atom_environment ():
Returns:
"""
raise NotImplementedError()
def setup_environments_subgraph(self, environments_symbols):
"""
Args:
environments_symbols ():
Returns:
"""
raise NotImplementedError()
def setup_atom_environments_subgraph(self, atoms_environments):
"""
Args:
atoms_environments ():
Returns:
"""
raise NotImplementedError()
def print_links(self):
"""
Returns:
"""
nodes = self.environment_subgraph().nodes()
print("Links in graph :")
for node in nodes:
print(node.isite, " is connected with : ")
for (n1, n2, data) in self.environment_subgraph().edges(node, data=True):
if n1.isite == data["start"]:
print(
" - {:d} by {:d} ligands ({:d} {:d} {:d})".format(
n2.isite,
len(data["ligands"]),
data["delta"][0],
data["delta"][1],
data["delta"][2],
)
)
else:
print(
" - {:d} by {:d} ligands ({:d} {:d} {:d})".format(
n2.isite,
len(data["ligands"]),
-data["delta"][0],
-data["delta"][1],
-data["delta"][2],
)
)
def as_dict(self):
"""
Returns:
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"light_structure_environments": self.light_structure_environments.as_dict(),
"connectivity_graph": jsanitize(nx.to_dict_of_dicts(self._graph)),
"environment_subgraphs": {
env_key: jsanitize(nx.to_dict_of_dicts(subgraph))
for env_key, subgraph in self.environment_subgraphs.items()
},
}
@classmethod
def from_dict(cls, d):
"""
Args:
d ():
Returns:
"""
# Reconstructs the graph with integer as nodes (json's as_dict replaces integer keys with str keys)
cgraph = nx.from_dict_of_dicts(d["connectivity_graph"], create_using=nx.MultiGraph, multigraph_input=True)
cgraph = nx.relabel_nodes(cgraph, int) # Just relabel the nodes using integer casting (maps str->int)
# Relabel multiedges (removes multiedges with str keys and adds them back with int keys)
edges = set(cgraph.edges())
for n1, n2 in edges:
new_edges = {int(iedge): edata for iedge, edata in cgraph[n1][n2].items()}
cgraph.remove_edges_from([(n1, n2, iedge) for iedge, edata in cgraph[n1][n2].items()])
cgraph.add_edges_from([(n1, n2, iedge, edata) for iedge, edata in new_edges.items()])
return cls(
LightStructureEnvironments.from_dict(d["light_structure_environments"]),
connectivity_graph=cgraph,
environment_subgraphs=None,
)
# TODO: also deserialize the environment_subgraphs
# environment_subgraphs={env_key: nx.from_dict_of_dicts(subgraph, multigraph_input=True)
# for env_key, subgraph in d['environment_subgraphs'].items()})
| {
"content_hash": "d9444261d29aca1383ca85153feed94b",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 115,
"avg_line_length": 41.25757575757576,
"alnum_prop": 0.5198310686742563,
"repo_name": "richardtran415/pymatgen",
"id": "190ef9fa191ea5fb67cba450e0717e20eda591c6",
"size": "16338",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/chemenv/connectivity/structure_connectivity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6783497"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
"""
Tests for L{twisted.application.app} and L{twisted.scripts.twistd}.
"""
import signal, inspect, errno
import os, sys, cPickle, StringIO
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import implements
from twisted.trial import unittest
from twisted.application import service, app
from twisted.scripts import twistd
from twisted.python import log
from twisted.python.usage import UsageError
from twisted.python.log import ILogObserver
from twisted.python.versions import Version
from twisted.python.components import Componentized
from twisted.internet.defer import Deferred
from twisted.python.fakepwd import UserDatabase
try:
from twisted.python import syslog
except ImportError:
syslog = None
try:
from twisted.scripts import _twistd_unix
except ImportError:
_twistd_unix = None
else:
from twisted.scripts._twistd_unix import UnixApplicationRunner
from twisted.scripts._twistd_unix import UnixAppLogger
try:
import profile
except ImportError:
profile = None
try:
import hotshot
import hotshot.stats
except (ImportError, SystemExit):
# For some reasons, hotshot.stats seems to raise SystemExit on some
# distributions, probably when considered non-free. See the import of
# this module in twisted.application.app for more details.
hotshot = None
try:
import pstats
import cProfile
except ImportError:
cProfile = None
def patchUserDatabase(patch, user, uid, group, gid):
"""
Patch L{pwd.getpwnam} so that it behaves as though only one user exists
and patch L{grp.getgrnam} so that it behaves as though only one group
exists.
@param patch: A function like L{TestCase.patch} which will be used to
install the fake implementations.
@type user: C{str}
@param user: The name of the single user which will exist.
@type uid: C{int}
@param uid: The UID of the single user which will exist.
@type group: C{str}
@param group: The name of the single user which will exist.
@type gid: C{int}
@param gid: The GID of the single group which will exist.
"""
# Try not to be an unverified fake, but try not to depend on quirks of
# the system either (eg, run as a process with a uid and gid which
# equal each other, and so doesn't reliably test that uid is used where
# uid should be used and gid is used where gid should be used). -exarkun
pwent = pwd.getpwuid(os.getuid())
grent = grp.getgrgid(os.getgid())
database = UserDatabase()
database.addUser(
user, pwent.pw_passwd, uid, pwent.pw_gid,
pwent.pw_gecos, pwent.pw_dir, pwent.pw_shell)
def getgrnam(name):
result = list(grent)
result[result.index(grent.gr_name)] = group
result[result.index(grent.gr_gid)] = gid
result = tuple(result)
return {group: result}[name]
patch(pwd, "getpwnam", database.getpwnam)
patch(grp, "getgrnam", getgrnam)
class MockServiceMaker(object):
"""
A non-implementation of L{twisted.application.service.IServiceMaker}.
"""
tapname = 'ueoa'
def makeService(self, options):
"""
Take a L{usage.Options} instance and return a
L{service.IService} provider.
"""
self.options = options
self.service = service.Service()
return self.service
class CrippledAppLogger(app.AppLogger):
"""
@see: CrippledApplicationRunner.
"""
def start(self, application):
pass
class CrippledApplicationRunner(twistd._SomeApplicationRunner):
"""
An application runner that cripples the platform-specific runner and
nasty side-effect-having code so that we can use it without actually
running any environment-affecting code.
"""
loggerFactory = CrippledAppLogger
def preApplication(self):
pass
def postApplication(self):
pass
class ServerOptionsTest(unittest.TestCase):
"""
Non-platform-specific tests for the pltaform-specific ServerOptions class.
"""
def test_postOptionsSubCommandCausesNoSave(self):
"""
postOptions should set no_save to True when a subcommand is used.
"""
config = twistd.ServerOptions()
config.subCommand = 'ueoa'
config.postOptions()
self.assertEquals(config['no_save'], True)
def test_postOptionsNoSubCommandSavesAsUsual(self):
"""
If no sub command is used, postOptions should not touch no_save.
"""
config = twistd.ServerOptions()
config.postOptions()
self.assertEquals(config['no_save'], False)
def test_reportProfileDeprecation(self):
"""
Check that the --report-profile option prints a C{DeprecationWarning}.
"""
config = twistd.ServerOptions()
self.assertWarns(
DeprecationWarning, "--report-profile option is deprecated and "
"a no-op since Twisted 8.0.", app.__file__,
config.parseOptions, ["--report-profile", "foo"])
def test_listAllProfilers(self):
"""
All the profilers that can be used in L{app.AppProfiler} are listed in
the help output.
"""
config = twistd.ServerOptions()
helpOutput = str(config)
for profiler in app.AppProfiler.profilers:
self.assertIn(profiler, helpOutput)
def test_defaultUmask(self):
"""
The default value for the C{umask} option is C{None}.
"""
config = twistd.ServerOptions()
self.assertEqual(config['umask'], None)
def test_umask(self):
"""
The value given for the C{umask} option is parsed as an octal integer
literal.
"""
config = twistd.ServerOptions()
config.parseOptions(['--umask', '123'])
self.assertEqual(config['umask'], 83)
config.parseOptions(['--umask', '0123'])
self.assertEqual(config['umask'], 83)
def test_invalidUmask(self):
"""
If a value is given for the C{umask} option which cannot be parsed as
an integer, L{UsageError} is raised by L{ServerOptions.parseOptions}.
"""
config = twistd.ServerOptions()
self.assertRaises(UsageError, config.parseOptions, ['--umask', 'abcdef'])
if _twistd_unix is None:
msg = "twistd unix not available"
test_defaultUmask.skip = test_umask.skip = test_invalidUmask.skip = msg
class TapFileTest(unittest.TestCase):
"""
Test twistd-related functionality that requires a tap file on disk.
"""
def setUp(self):
"""
Create a trivial Application and put it in a tap file on disk.
"""
self.tapfile = self.mktemp()
f = file(self.tapfile, 'wb')
cPickle.dump(service.Application("Hi!"), f)
f.close()
def test_createOrGetApplicationWithTapFile(self):
"""
Ensure that the createOrGetApplication call that 'twistd -f foo.tap'
makes will load the Application out of foo.tap.
"""
config = twistd.ServerOptions()
config.parseOptions(['-f', self.tapfile])
application = CrippledApplicationRunner(config).createOrGetApplication()
self.assertEquals(service.IService(application).name, 'Hi!')
class TestLoggerFactory(object):
"""
A logger factory for L{TestApplicationRunner}.
"""
def __init__(self, runner):
self.runner = runner
def start(self, application):
"""
Save the logging start on the C{runner} instance.
"""
self.runner.order.append("log")
self.runner.hadApplicationLogObserver = hasattr(self.runner,
'application')
def stop(self):
"""
Don't log anything.
"""
class TestApplicationRunner(app.ApplicationRunner):
"""
An ApplicationRunner which tracks the environment in which its methods are
called.
"""
def __init__(self, options):
app.ApplicationRunner.__init__(self, options)
self.order = []
self.logger = TestLoggerFactory(self)
def preApplication(self):
self.order.append("pre")
self.hadApplicationPreApplication = hasattr(self, 'application')
def postApplication(self):
self.order.append("post")
self.hadApplicationPostApplication = hasattr(self, 'application')
class ApplicationRunnerTest(unittest.TestCase):
"""
Non-platform-specific tests for the platform-specific ApplicationRunner.
"""
def setUp(self):
config = twistd.ServerOptions()
self.serviceMaker = MockServiceMaker()
# Set up a config object like it's been parsed with a subcommand
config.loadedPlugins = {'test_command': self.serviceMaker}
config.subOptions = object()
config.subCommand = 'test_command'
self.config = config
def test_applicationRunnerGetsCorrectApplication(self):
"""
Ensure that a twistd plugin gets used in appropriate ways: it
is passed its Options instance, and the service it returns is
added to the application.
"""
arunner = CrippledApplicationRunner(self.config)
arunner.run()
self.assertIdentical(
self.serviceMaker.options, self.config.subOptions,
"ServiceMaker.makeService needs to be passed the correct "
"sub Command object.")
self.assertIdentical(
self.serviceMaker.service,
service.IService(arunner.application).services[0],
"ServiceMaker.makeService's result needs to be set as a child "
"of the Application.")
def test_preAndPostApplication(self):
"""
Test thet preApplication and postApplication methods are
called by ApplicationRunner.run() when appropriate.
"""
s = TestApplicationRunner(self.config)
s.run()
self.assertFalse(s.hadApplicationPreApplication)
self.assertTrue(s.hadApplicationPostApplication)
self.assertTrue(s.hadApplicationLogObserver)
self.assertEquals(s.order, ["pre", "log", "post"])
def _applicationStartsWithConfiguredID(self, argv, uid, gid):
"""
Assert that given a particular command line, an application is started
as a particular UID/GID.
@param argv: A list of strings giving the options to parse.
@param uid: An integer giving the expected UID.
@param gid: An integer giving the expected GID.
"""
self.config.parseOptions(argv)
events = []
class FakeUnixApplicationRunner(twistd._SomeApplicationRunner):
def setupEnvironment(self, chroot, rundir, nodaemon, umask,
pidfile):
events.append('environment')
def shedPrivileges(self, euid, uid, gid):
events.append(('privileges', euid, uid, gid))
def startReactor(self, reactor, oldstdout, oldstderr):
events.append('reactor')
def removePID(self, pidfile):
pass
class FakeService(object):
implements(service.IService, service.IProcess)
processName = None
def privilegedStartService(self):
events.append('privilegedStartService')
def startService(self):
events.append('startService')
def stopService(self):
pass
runner = FakeUnixApplicationRunner(self.config)
runner.preApplication()
runner.application = FakeService()
runner.postApplication()
self.assertEqual(
events,
['environment', 'privilegedStartService',
('privileges', False, uid, gid), 'startService', 'reactor'])
def test_applicationStartsWithConfiguredNumericIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as numeric strings by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
uid = 1234
gid = 4321
self._applicationStartsWithConfiguredID(
["--uid", str(uid), "--gid", str(gid)], uid, gid)
def test_applicationStartsWithConfiguredNameIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as user and group names by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
user = "foo"
uid = 1234
group = "bar"
gid = 4321
patchUserDatabase(self.patch, user, uid, group, gid)
self._applicationStartsWithConfiguredID(
["--uid", user, "--gid", group], uid, gid)
if getattr(os, 'setuid', None) is None:
msg = "Platform does not support --uid/--gid twistd options."
test_applicationStartsWithConfiguredNameIDs.skip = msg
test_applicationStartsWithConfiguredNumericIDs.skip = msg
del msg
def test_startReactorRunsTheReactor(self):
"""
L{startReactor} calls L{reactor.run}.
"""
reactor = DummyReactor()
runner = app.ApplicationRunner({
"profile": False,
"profiler": "profile",
"debug": False})
runner.startReactor(reactor, None, None)
self.assertTrue(
reactor.called, "startReactor did not call reactor.run()")
class UnixApplicationRunnerSetupEnvironmentTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.setupEnvironment}.
@ivar root: The root of the filesystem, or C{unset} if none has been
specified with a call to L{os.chroot} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests.chroot ).
@ivar cwd: The current working directory of the process, or C{unset} if
none has been specified with a call to L{os.chdir} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.chdir).
@ivar mask: The current file creation mask of the process, or C{unset} if
none has been specified with a call to L{os.umask} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.umask).
@ivar daemon: A boolean indicating whether daemonization has been performed
by a call to L{_twistd_unix.daemonize} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
unset = object()
def setUp(self):
self.root = self.unset
self.cwd = self.unset
self.mask = self.unset
self.daemon = False
self.pid = os.getpid()
self.patch(os, 'chroot', lambda path: setattr(self, 'root', path))
self.patch(os, 'chdir', lambda path: setattr(self, 'cwd', path))
self.patch(os, 'umask', lambda mask: setattr(self, 'mask', mask))
self.patch(_twistd_unix, "daemonize", self.daemonize)
self.runner = UnixApplicationRunner({})
def daemonize(self):
"""
Indicate that daemonization has happened and change the PID so that the
value written to the pidfile can be tested in the daemonization case.
"""
self.daemon = True
self.patch(os, 'getpid', lambda: self.pid + 1)
def test_chroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the root of the
filesystem if passed a non-C{None} value for the C{chroot} parameter.
"""
self.runner.setupEnvironment("/foo/bar", ".", True, None, None)
self.assertEqual(self.root, "/foo/bar")
def test_noChroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not change the root of
the filesystem if passed C{None} for the C{chroot} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIdentical(self.root, self.unset)
def test_changeWorkingDirectory(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the working directory
of the process to the path given for the C{rundir} parameter.
"""
self.runner.setupEnvironment(None, "/foo/bar", True, None, None)
self.assertEqual(self.cwd, "/foo/bar")
def test_daemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} daemonizes the process if
C{False} is passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertTrue(self.daemon)
def test_noDaemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not daemonize the
process if C{True} is passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertFalse(self.daemon)
def test_nonDaemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the process's PID to
the file specified by the C{pidfile} parameter.
"""
pidfile = self.mktemp()
self.runner.setupEnvironment(None, ".", True, None, pidfile)
fObj = file(pidfile)
pid = int(fObj.read())
fObj.close()
self.assertEqual(pid, self.pid)
def test_daemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the daemonized
process's PID to the file specified by the C{pidfile} parameter if
C{nodaemon} is C{False}.
"""
pidfile = self.mktemp()
self.runner.setupEnvironment(None, ".", False, None, pidfile)
fObj = file(pidfile)
pid = int(fObj.read())
fObj.close()
self.assertEqual(pid, self.pid + 1)
def test_umask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
the value specified by the C{umask} parameter.
"""
self.runner.setupEnvironment(None, ".", False, 123, None)
self.assertEqual(self.mask, 123)
def test_noDaemonizeNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} doesn't change the process
umask if C{None} is passed for the C{umask} parameter and C{True} is
passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIdentical(self.mask, self.unset)
def test_daemonizedNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
C{0077} if C{None} is passed for the C{umask} parameter and C{False} is
passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertEqual(self.mask, 0077)
class UnixApplicationRunnerStartApplicationTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.startApplication}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_setupEnvironment(self):
"""
L{UnixApplicationRunner.startApplication} calls
L{UnixApplicationRunner.setupEnvironment} with the chroot, rundir,
nodaemon, umask, and pidfile parameters from the configuration it is
constructed with.
"""
options = twistd.ServerOptions()
options.parseOptions([
'--nodaemon',
'--umask', '0070',
'--chroot', '/foo/chroot',
'--rundir', '/foo/rundir',
'--pidfile', '/foo/pidfile'])
application = service.Application("test_setupEnvironment")
self.runner = UnixApplicationRunner(options)
args = []
def fakeSetupEnvironment(self, chroot, rundir, nodaemon, umask, pidfile):
args.extend((chroot, rundir, nodaemon, umask, pidfile))
# Sanity check
self.assertEqual(
inspect.getargspec(self.runner.setupEnvironment),
inspect.getargspec(fakeSetupEnvironment))
self.patch(UnixApplicationRunner, 'setupEnvironment', fakeSetupEnvironment)
self.patch(UnixApplicationRunner, 'shedPrivileges', lambda *a, **kw: None)
self.patch(app, 'startApplication', lambda *a, **kw: None)
self.runner.startApplication(application)
self.assertEqual(
args,
['/foo/chroot', '/foo/rundir', True, 56, '/foo/pidfile'])
class UnixApplicationRunnerRemovePID(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.removePID}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_removePID(self):
"""
L{UnixApplicationRunner.removePID} deletes the file the name of
which is passed to it.
"""
runner = UnixApplicationRunner({})
path = self.mktemp()
os.makedirs(path)
pidfile = os.path.join(path, "foo.pid")
file(pidfile, "w").close()
runner.removePID(pidfile)
self.assertFalse(os.path.exists(pidfile))
def test_removePIDErrors(self):
"""
Calling L{UnixApplicationRunner.removePID} with a non-existent filename logs
an OSError.
"""
runner = UnixApplicationRunner({})
runner.removePID("fakepid")
errors = self.flushLoggedErrors(OSError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.errno, errno.ENOENT)
class DummyReactor(object):
"""
A dummy reactor, only providing a C{run} method and checking that it
has been called.
@ivar called: if C{run} has been called or not.
@type called: C{bool}
"""
called = False
def run(self):
"""
A fake run method, checking that it's been called one and only time.
"""
if self.called:
raise RuntimeError("Already called")
self.called = True
class AppProfilingTestCase(unittest.TestCase):
"""
Tests for L{app.AppProfiler}.
"""
def test_profile(self):
"""
L{app.ProfileRunner.run} should call the C{run} method of the reactor
and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
data = file(config["profile"]).read()
self.assertIn("DummyReactor.run", data)
self.assertIn("function calls", data)
if profile is None:
test_profile.skip = "profile module not available"
def _testStats(self, statsClass, profile):
out = StringIO.StringIO()
# Patch before creating the pstats, because pstats binds self.stream to
# sys.stdout early in 2.5 and newer.
stdout = self.patch(sys, 'stdout', out)
# If pstats.Stats can load the data and then reformat it, then the
# right thing probably happened.
stats = statsClass(profile)
stats.print_stats()
stdout.restore()
data = out.getvalue()
self.assertIn("function calls", data)
self.assertIn("(run)", data)
def test_profileSaveStats(self):
"""
With the C{savestats} option specified, L{app.ProfileRunner.run}
should save the raw stats object instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config['profile'])
if profile is None:
test_profileSaveStats.skip = "profile module not available"
def test_withoutProfile(self):
"""
When the C{profile} module is not present, L{app.ProfilerRunner.run}
should raise a C{SystemExit} exception.
"""
savedModules = sys.modules.copy()
config = twistd.ServerOptions()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
sys.modules["profile"] = None
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_profilePrintStatsError(self):
"""
When an error happens during the print of the stats, C{sys.stdout}
should be restored to its initial value.
"""
class ErroneousProfile(profile.Profile):
def print_stats(self):
raise RuntimeError("Boom")
self.patch(profile, "Profile", ErroneousProfile)
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
oldStdout = sys.stdout
self.assertRaises(RuntimeError, profiler.run, reactor)
self.assertIdentical(sys.stdout, oldStdout)
if profile is None:
test_profilePrintStatsError.skip = "profile module not available"
def test_hotshot(self):
"""
L{app.HotshotRunner.run} should call the C{run} method of the reactor
and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "hotshot"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
data = file(config["profile"]).read()
self.assertIn("run", data)
self.assertIn("function calls", data)
if hotshot is None:
test_hotshot.skip = "hotshot module not available"
def test_hotshotSaveStats(self):
"""
With the C{savestats} option specified, L{app.HotshotRunner.run} should
save the raw stats object instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "hotshot"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(hotshot.stats.load, config['profile'])
if hotshot is None:
test_hotshotSaveStats.skip = "hotshot module not available"
def test_withoutHotshot(self):
"""
When the C{hotshot} module is not present, L{app.HotshotRunner.run}
should raise a C{SystemExit} exception and log the C{ImportError}.
"""
savedModules = sys.modules.copy()
sys.modules["hotshot"] = None
config = twistd.ServerOptions()
config["profiler"] = "hotshot"
profiler = app.AppProfiler(config)
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_hotshotPrintStatsError(self):
"""
When an error happens while printing the stats, C{sys.stdout}
should be restored to its initial value.
"""
class ErroneousStats(pstats.Stats):
def print_stats(self):
raise RuntimeError("Boom")
self.patch(pstats, "Stats", ErroneousStats)
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "hotshot"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
oldStdout = sys.stdout
self.assertRaises(RuntimeError, profiler.run, reactor)
self.assertIdentical(sys.stdout, oldStdout)
if hotshot is None:
test_hotshotPrintStatsError.skip = "hotshot module not available"
def test_cProfile(self):
"""
L{app.CProfileRunner.run} should call the C{run} method of the
reactor and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
data = file(config["profile"]).read()
self.assertIn("run", data)
self.assertIn("function calls", data)
if cProfile is None:
test_cProfile.skip = "cProfile module not available"
def test_cProfileSaveStats(self):
"""
With the C{savestats} option specified,
L{app.CProfileRunner.run} should save the raw stats object
instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config['profile'])
if cProfile is None:
test_cProfileSaveStats.skip = "cProfile module not available"
def test_withoutCProfile(self):
"""
When the C{cProfile} module is not present,
L{app.CProfileRunner.run} should raise a C{SystemExit}
exception and log the C{ImportError}.
"""
savedModules = sys.modules.copy()
sys.modules["cProfile"] = None
config = twistd.ServerOptions()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_unknownProfiler(self):
"""
Check that L{app.AppProfiler} raises L{SystemExit} when given an
unknown profiler name.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "foobar"
error = self.assertRaises(SystemExit, app.AppProfiler, config)
self.assertEquals(str(error), "Unsupported profiler name: foobar")
def test_defaultProfiler(self):
"""
L{app.Profiler} defaults to the hotshot profiler if not specified.
"""
profiler = app.AppProfiler({})
self.assertEquals(profiler.profiler, "hotshot")
def test_profilerNameCaseInsentive(self):
"""
The case of the profiler name passed to L{app.AppProfiler} is not
relevant.
"""
profiler = app.AppProfiler({"profiler": "HotShot"})
self.assertEquals(profiler.profiler, "hotshot")
def _patchFileLogObserver(patch):
"""
Patch L{log.FileLogObserver} to record every call and keep a reference to
the passed log file for tests.
@param patch: a callback for patching (usually L{unittest.TestCase.patch}).
@return: the list that keeps track of the log files.
@rtype: C{list}
"""
logFiles = []
oldFileLobObserver = log.FileLogObserver
def FileLogObserver(logFile):
logFiles.append(logFile)
return oldFileLobObserver(logFile)
patch(log, 'FileLogObserver', FileLogObserver)
return logFiles
class AppLoggerTestCase(unittest.TestCase):
"""
Tests for L{app.AppLogger}.
@ivar observers: list of observers installed during the tests.
@type observers: C{list}
"""
def setUp(self):
"""
Override L{log.addObserver} so that we can trace the observers
installed in C{self.observers}.
"""
self.observers = []
def startLoggingWithObserver(observer):
self.observers.append(observer)
log.addObserver(observer)
self.patch(log, 'startLoggingWithObserver', startLoggingWithObserver)
def tearDown(self):
"""
Remove all installed observers.
"""
for observer in self.observers:
log.removeObserver(observer)
def _checkObserver(self, logs):
"""
Ensure that initial C{twistd} logs are written to the given list.
@type logs: C{list}
@param logs: The list whose C{append} method was specified as the
initial log observer.
"""
self.assertEquals(self.observers, [logs.append])
self.assertIn("starting up", logs[0]["message"][0])
self.assertIn("reactor class", logs[1]["message"][0])
def test_start(self):
"""
L{app.AppLogger.start} calls L{log.addObserver}, and then writes some
messages about twistd and the reactor.
"""
logger = app.AppLogger({})
observer = []
logger._getLogObserver = lambda: observer.append
logger.start(Componentized())
self._checkObserver(observer)
def test_startUsesApplicationLogObserver(self):
"""
When the L{ILogObserver} component is available on the application,
that object will be used as the log observer instead of constructing a
new one.
"""
application = Componentized()
logs = []
application.setComponent(ILogObserver, logs.append)
logger = app.AppLogger({})
logger.start(application)
self._checkObserver(logs)
def test_getLogObserverStdout(self):
"""
When logfile is empty or set to C{-}, L{app.AppLogger._getLogObserver}
returns a log observer pointing at C{sys.stdout}.
"""
logger = app.AppLogger({"logfile": "-"})
logFiles = _patchFileLogObserver(self.patch)
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertIdentical(logFiles[0], sys.stdout)
logger = app.AppLogger({"logfile": ""})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 2)
self.assertIdentical(logFiles[1], sys.stdout)
def test_getLogObserverFile(self):
"""
When passing the C{logfile} option, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path.
"""
logFiles = _patchFileLogObserver(self.patch)
filename = self.mktemp()
logger = app.AppLogger({"logfile": filename})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertEquals(logFiles[0].path,
os.path.abspath(filename))
def test_stop(self):
"""
L{app.AppLogger.stop} removes the observer created in C{start}, and
reinitialize its C{_observer} so that if C{stop} is called several
times it doesn't break.
"""
removed = []
observer = object()
def remove(observer):
removed.append(observer)
self.patch(log, 'removeObserver', remove)
logger = app.AppLogger({})
logger._observer = observer
logger.stop()
self.assertEquals(removed, [observer])
logger.stop()
self.assertEquals(removed, [observer])
self.assertIdentical(logger._observer, None)
class UnixAppLoggerTestCase(unittest.TestCase):
"""
Tests for L{UnixAppLogger}.
@ivar signals: list of signal handlers installed.
@type signals: C{list}
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def setUp(self):
"""
Fake C{signal.signal} for not installing the handlers but saving them
in C{self.signals}.
"""
self.signals = []
def fakeSignal(sig, f):
self.signals.append((sig, f))
self.patch(signal, "signal", fakeSignal)
def test_getLogObserverStdout(self):
"""
When non-daemonized and C{logfile} is empty or set to C{-},
L{UnixAppLogger._getLogObserver} returns a log observer pointing at
C{sys.stdout}.
"""
logFiles = _patchFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "-", "nodaemon": True})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertIdentical(logFiles[0], sys.stdout)
logger = UnixAppLogger({"logfile": "", "nodaemon": True})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 2)
self.assertIdentical(logFiles[1], sys.stdout)
def test_getLogObserverStdoutDaemon(self):
"""
When daemonized and C{logfile} is set to C{-},
L{UnixAppLogger._getLogObserver} raises C{SystemExit}.
"""
logger = UnixAppLogger({"logfile": "-", "nodaemon": False})
error = self.assertRaises(SystemExit, logger._getLogObserver)
self.assertEquals(str(error), "Daemons cannot log to stdout, exiting!")
def test_getLogObserverFile(self):
"""
When C{logfile} contains a file name, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path, and a signal
handler rotating the log is installed.
"""
logFiles = _patchFileLogObserver(self.patch)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertEquals(logFiles[0].path,
os.path.abspath(filename))
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0][0], signal.SIGUSR1)
d = Deferred()
def rotate():
d.callback(None)
logFiles[0].rotate = rotate
rotateLog = self.signals[0][1]
rotateLog(None, None)
return d
def test_getLogObserverDontOverrideSignalHandler(self):
"""
If a signal handler is already installed,
L{UnixAppLogger._getLogObserver} doesn't override it.
"""
def fakeGetSignal(sig):
self.assertEquals(sig, signal.SIGUSR1)
return object()
self.patch(signal, "getsignal", fakeGetSignal)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
observer = logger._getLogObserver()
self.assertEquals(self.signals, [])
def test_getLogObserverDefaultFile(self):
"""
When daemonized and C{logfile} is empty, the observer returned by
L{UnixAppLogger._getLogObserver} points at C{twistd.log} in the current
directory.
"""
logFiles = _patchFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "", "nodaemon": False})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertEquals(logFiles[0].path,
os.path.abspath("twistd.log"))
def test_getLogObserverSyslog(self):
"""
If C{syslog} is set to C{True}, L{UnixAppLogger._getLogObserver} starts
a L{syslog.SyslogObserver} with given C{prefix}.
"""
class fakesyslogobserver(object):
def __init__(self, prefix):
fakesyslogobserver.prefix = prefix
def emit(self, eventDict):
pass
self.patch(syslog, "SyslogObserver", fakesyslogobserver)
logger = UnixAppLogger({"syslog": True, "prefix": "test-prefix"})
observer = logger._getLogObserver()
self.assertEquals(fakesyslogobserver.prefix, "test-prefix")
if syslog is None:
test_getLogObserverSyslog.skip = "Syslog not available"
class DeprecationTests(unittest.TestCase):
"""
Tests for deprecated features.
"""
def test_initialLog(self):
"""
L{app.initialLog} is deprecated.
"""
logs = []
log.addObserver(logs.append)
self.addCleanup(log.removeObserver, logs.append)
self.callDeprecated(Version("Twisted", 8, 2, 0), app.initialLog)
self.assertEquals(len(logs), 2)
self.assertIn("starting up", logs[0]["message"][0])
self.assertIn("reactor class", logs[1]["message"][0])
| {
"content_hash": "8170257220b365472a8ad61b9abe136e",
"timestamp": "",
"source": "github",
"line_count": 1279,
"max_line_length": 84,
"avg_line_length": 31.773260359655982,
"alnum_prop": 0.625227619469462,
"repo_name": "GetSomeBlocks/ServerStatus",
"id": "eb00d1bcc57fdd71ad5934d0e8babf30b744bc29",
"size": "40721",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "resources/lib/twisted/twisted/test/test_twistd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "930"
},
{
"name": "C",
"bytes": "293000"
},
{
"name": "C#",
"bytes": "9664"
},
{
"name": "CSS",
"bytes": "24716"
},
{
"name": "D",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "374176"
},
{
"name": "Java",
"bytes": "206"
},
{
"name": "Objective-C",
"bytes": "9421"
},
{
"name": "Python",
"bytes": "8744725"
},
{
"name": "Ruby",
"bytes": "6773"
},
{
"name": "Shell",
"bytes": "13600"
}
],
"symlink_target": ""
} |
# =================================================================
#
# Authors: Ricardo Garcia Silva <[email protected]>
#
# Copyright (c) 2017 Ricardo Garcia Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
"""Unit tests for pycsw.core.util"""
import datetime as dt
import os
import time
from pathlib import Path
import mock
import pytest
from shapely.wkt import loads
from pycsw.core import util
pytestmark = pytest.mark.unit
def test_get_today_and_now():
fake_now = "2017-01-01T00:00:00Z"
with mock.patch.object(util.time, "localtime") as mock_localtime:
mock_localtime.return_value = time.strptime(
fake_now,
"%Y-%m-%dT%H:%M:%SZ"
)
result = util.get_today_and_now()
assert result == fake_now
@pytest.mark.parametrize("value, expected", [
(dt.date(2017, 1, 23), "2017-01-23"),
(dt.datetime(2017, 1, 23), "2017-01-23"),
(dt.datetime(2017, 1, 23, 20, 32, 10), "2017-01-23T20:32:10Z"),
(dt.datetime(2017, 1, 23, 10), "2017-01-23T10:00:00Z"),
(dt.datetime(2017, 1, 23, 10, 20), "2017-01-23T10:20:00Z"),
])
def test_datetime2iso8601(value, expected):
result = util.datetime2iso8601(value)
assert result == expected
@pytest.mark.parametrize("version, expected", [
("2", -1),
("1.2", -1),
("5.4.3.2", -1),
("This is a regular string, not a version", -1),
("3.4.1", 30401),
])
def test_get_version_integer(version, expected):
result = util.get_version_integer(version)
assert result == expected
@pytest.mark.parametrize("invalid_version", [
2,
2.2,
None,
])
def test_get_version_integer_invalid_version(invalid_version):
with pytest.raises(RuntimeError):
util.get_version_integer(invalid_version)
@pytest.mark.parametrize("xpath_expression, expected", [
("ns1:first", "{something}first"),
("ns1:first/ns2:second", "{something}first/{other}second"),
("ns1:first/ns2:second[1]", "{something}first/{other}second[1]"),
("ns1:first/*/ns3:third", "{something}first/*/{another}third"),
("", ""),
])
def test_nspath_eval(xpath_expression, expected):
nsmap = {
"ns1": "something",
"ns2": "other",
"ns3": "another",
}
result = util.nspath_eval(xpath_expression, nsmap)
assert result == expected
def test_nspath_eval_invalid_element():
with pytest.raises(RuntimeError):
util.nspath_eval(
xpath="ns1:tag1/ns2:ns3:tag2",
nsmap={
"ns1": "something",
"ns2": "other",
"ns3": "another",
}
)
@pytest.mark.parametrize("envelope, expected", [
("ENVELOPE (-180,180,90,-90)", "-180,-90,180,90"),
(" ENVELOPE(-180,180,90,-90)", "-180,-90,180,90"),
(" ENVELOPE( -180, 180, 90, -90) ", "-180,-90,180,90"),
])
def test_wktenvelope2bbox(envelope, expected):
result = util.wktenvelope2bbox(envelope)
assert result == expected
# TODO - Add more WKT cases for other geometry types
@pytest.mark.parametrize("wkt, bounds, expected", [
("POINT (10 10)", True, (10.0, 10.0, 10.0, 10.0)),
("SRID=4326;POINT (10 10)", True, (10.0, 10.0, 10.0, 10.0)),
("POINT (10 10)", False, loads("POINT (10 10)")),
("SRID=4326;POINT (10 10)", False, loads("POINT (10 10)")),
])
def test_wkt2geom(wkt, bounds, expected):
result = util.wkt2geom(wkt, bounds=bounds)
assert result == expected
@pytest.mark.parametrize("bbox, expected", [
(
"0.0, 10.0, 30.0, 15.0",
"POLYGON((0.00 10.00, 0.00 15.00, 30.00 15.00, "
"30.00 10.00, 0.00 10.00))"
),
(
"-10.0, 10.0, 30.0, 15.0",
"POLYGON((-10.00 10.00, -10.00 15.00, 30.00 15.00, "
"30.00 10.00, -10.00 10.00))"
),
])
def test_bbox2wktpolygon(bbox, expected):
result = util.bbox2wktpolygon(bbox)
assert result == expected
def test_transform_mappings():
queryables = {
"q1": {"xpath": "p1", "dbcol": "col1"},
"q2": {"xpath": "p2", "dbcol": "col2"},
}
typename = {"q2": "q1"}
duplicate_queryables = queryables.copy()
duplicate_typename = typename.copy()
util.transform_mappings(duplicate_queryables, duplicate_typename)
assert duplicate_queryables["q1"]["xpath"] == queryables["q2"]["xpath"]
assert duplicate_queryables["q1"]["dbcol"] == queryables["q2"]["dbcol"]
@pytest.mark.parametrize("name, value, expected", [
("name", "john", "john"),
("date", dt.date(2017, 1, 1), "2017-01-01"),
("datetime", dt.datetime(2017, 1, 1, 10, 5), "2017-01-01T10:05:00Z"),
("some_callable", os.getcwd, os.getcwd()),
])
def test_getqattr_no_link(name, value, expected):
class Phony(object):
pass
instance = Phony()
setattr(instance, name, value)
result = util.getqattr(instance, name)
assert result == expected
def test_getqattr_link():
some_object = mock.MagicMock()
some_object.some_link.return_value = [
["one", "two"],
["three", "four"],
]
result = util.getqattr(some_object, "some_link")
assert result == "one,two^three,four"
def test_getqattr_invalid():
result = util.getqattr(dt.date(2017, 1, 1), "name")
assert result is None
def test_http_request_post():
# here we replace owslib.util.http_post with a mock object
# because we are not interested in testing owslib
method = "POST"
url = "some_phony_url"
request = "some_phony_request"
timeout = 40
with mock.patch("pycsw.core.util.http_post",
autospec=True) as mock_http_post:
util.http_request(
method=method,
url=url,
request=request,
timeout=timeout
)
mock_http_post.assert_called_with(url, request, timeout=timeout)
@pytest.mark.parametrize("url, expected", [
("http://host/wms", "http://host/wms?"),
("http://host/wms?foo=bar&", "http://host/wms?foo=bar&"),
("http://host/wms?foo=bar", "http://host/wms?foo=bar&"),
("http://host/wms?", "http://host/wms?"),
("http://host/wms?foo", "http://host/wms?foo&"),
])
def test_bind_url(url, expected):
result = util.bind_url(url)
assert result == expected
@pytest.mark.parametrize("ip, netmask, expected", [
("192.168.100.14", "192.168.100.0/24", True),
("192.168.100.14", "192.168.0.0/24", False),
("192.168.100.14", "192.168.0.0/16", True),
])
def test_ip_in_network_cidr(ip, netmask, expected):
result = util.ip_in_network_cidr(ip, netmask)
assert result == expected
@pytest.mark.parametrize("ip, whitelist, expected", [
("192.168.100.14", [], False),
("192.168.100.14", ["192.168.100.15"], False),
("192.168.100.14", ["192.168.100.15", "192.168.100.14"], True),
("192.168.100.14", ["192.168.100.*"], True),
("192.168.100.14", ["192.168.100.15", "192.168.100.*"], True),
("192.168.100.14", ["192.168.100.0/24"], True),
("192.168.100.14", ["192.168.100.15", "192.168.100.0/24"], True),
("192.168.10.14", ["192.168.100.15", "192.168.0.0/16"], True),
])
def test_ipaddress_in_whitelist(ip, whitelist, expected):
result = util.ipaddress_in_whitelist(ip, whitelist)
assert result == expected
@pytest.mark.parametrize("linkstr, expected", [
# old style CSV
("roads,my roads,OGC:WMS,http://example.org/wms",
[{
"name": "roads",
"description": "my roads",
"protocol": "OGC:WMS",
"url": "http://example.org/wms"
}]
),
# old style CSV with some empty tokens
(",,,http://example.org/wms",
[{
"name": None,
"description": None,
"protocol": None,
"url": "http://example.org/wms"
}]
),
# old style CSV with empty tokens
(",,,",
[{
"name": None,
"description": None,
"protocol": None,
"url": None
}]
),
# old style CSV with 2 links
("roads,my roads,OGC:WMS,http://example.org/wms^roads,my roads,OGC:WFS,http://example.org/wfs",
[{
"name": "roads",
"description": "my roads",
"protocol": "OGC:WMS",
"url": "http://example.org/wms"
}, {
"name": "roads",
"description": "my roads",
"protocol": "OGC:WFS",
"url": "http://example.org/wfs"
}]
),
# JSON style
('[{"name": "roads", "description": "my roads", "protocol": "OGC:WMS", "url": "http://example.org/wms"}]',
[{
"name": "roads",
"description": "my roads",
"protocol": "OGC:WMS",
"url": "http://example.org/wms"
}]
),
# JSON style with some empty keys
('[{"name": "roads", "description": null, "protocol": "OGC:WMS", "url": "http://example.org/wms"}]',
[{
"name": "roads",
"description": None,
"protocol": "OGC:WMS",
"url": "http://example.org/wms"
}]
),
# JSON style with multiple links
('[{"name": "roads", "description": null, "protocol": "OGC:WMS", "url": "http://example.org/wms"},'
'{"name": "roads", "description": null, "protocol": "OGC:WFS", "url": "http://example.org/wfs"}]',
[{
"name": "roads",
"description": None,
"protocol": "OGC:WMS",
"url": "http://example.org/wms"
}, {
"name": "roads",
"description": None,
"protocol": "OGC:WFS",
"url": "http://example.org/wfs"
}]
)
])
def test_jsonify_links(linkstr, expected):
result = util.jsonify_links(linkstr)
assert isinstance(result, list)
assert result == expected
@pytest.mark.parametrize("value, result", [
("foo", False),
(None, True),
('', True),
(' ', True),
(' ', True),
])
def test_is_none_or_empty(value, result):
assert util.is_none_or_empty(value) is result
@pytest.mark.parametrize("import_path, expected_attribute", [
pytest.param("itertools", "count", id="import from stdlib"),
pytest.param("pycsw.core.admin", "setup_db", id="dotted path import from pycsw"),
pytest.param(__file__, "test_programmatic_import", id="filesystem path import"),
])
def test_programmatic_import(import_path, expected_attribute):
imported_module = util.programmatic_import(import_path)
assert getattr(imported_module, expected_attribute)
@pytest.mark.parametrize("invalid_import_path", [
"dummy",
"dummy.submodule",
"/non-existent/path",
str(Path(__file__).parent / "invalid_path"),
])
def test_programmatic_import_with_invalid_path(invalid_import_path):
result = util.programmatic_import(invalid_import_path)
assert result is None
| {
"content_hash": "099b84b72a21c7bc2915aefe804b028b",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 110,
"avg_line_length": 31.918478260869566,
"alnum_prop": 0.5893069981270219,
"repo_name": "geopython/pycsw",
"id": "49329027d2a03daeca6b708bd93c9a0450625783",
"size": "11746",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unittests/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2909"
},
{
"name": "HTML",
"bytes": "25468"
},
{
"name": "Makefile",
"bytes": "677"
},
{
"name": "Python",
"bytes": "881652"
},
{
"name": "Shell",
"bytes": "129"
},
{
"name": "XSLT",
"bytes": "357"
}
],
"symlink_target": ""
} |
import os
WERCKER_FOLDER_NAME = '.wercker'
WERCKER_CREDENTIALS_FILE = 'credentials'
def get_global_wercker_path():
return os.path.join(os.environ['HOME'], WERCKER_FOLDER_NAME)
def get_global_wercker_filename():
return os.path.join(get_global_wercker_path(), WERCKER_CREDENTIALS_FILE)
def check_or_create_path(path):
if not os.path.isdir(path):
os.makedirs(path)
return True
def find_folder_containing_folder_name(path, folder_name):
"""Find the nearest parent with a <folder_name> folder"""
if path == os.path.curdir:
path = os.path.realpath(path)
if os.path.isdir(path):
if os.path.isdir(os.path.join(path, folder_name)):
return path
else:
parent = os.path.realpath(os.path.join(path, os.path.pardir))
if parent == path:
return None
return find_folder_containing_folder_name(parent, folder_name)
def find_git_root(path, folder_name=".git"):
"""
Find the nearest parent with <folder_name> folder, handy for
locating the ".git" root folder
"""
return find_folder_containing_folder_name(path, folder_name)
| {
"content_hash": "d1e55374ec695937522b3d3bcf0e3cd4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 27.209302325581394,
"alnum_prop": 0.6461538461538462,
"repo_name": "wercker/wercker-cli",
"id": "50ce6f6c4943559d2475c180a3c05d6720288d95",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "werckercli/paths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149113"
},
{
"name": "Shell",
"bytes": "67349"
}
],
"symlink_target": ""
} |
from mock import patch, Mock, call
from ...testcases import DustyTestCase
from dusty.commands.status import _has_active_container, get_dusty_status
from dusty.schemas.base_schema_class import DustySchema
from ..utils import get_app_dusty_schema, get_bundle_dusty_schema, get_lib_dusty_schema
class TestStatusCommands(DustyTestCase):
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_lib_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(False, _has_active_container('lib', 'lib-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_lib_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('lib', 'lib-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_app_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(True, _has_active_container('app', 'app-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_app_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('app', 'app-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_service_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(True, _has_active_container('service', 'service-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_service_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('service', 'service-a'))
@patch('dusty.commands.status.docker_vm_is_running')
@patch('dusty.systems.docker.get_docker_client')
@patch('dusty.commands.status.PrettyTable')
@patch('dusty.commands.status.get_dusty_containers')
@patch('dusty.schemas.base_schema_class.get_specs_from_path')
@patch('dusty.compiler.spec_assembler._get_referenced_apps')
@patch('dusty.compiler.spec_assembler._get_referenced_libs')
@patch('dusty.compiler.spec_assembler._get_referenced_services')
def test_get_dusty_status_active_1(self, fake_get_services, fake_get_libs, fake_get_apps, fake_get_specs,
fake_get_dusty_containers, fake_pretty_table, fake_get_docker_client, fake_vm_is_running):
fake_get_services.return_value = set(['ser1', 'ser2', 'ser3'])
fake_get_libs.return_value = set(['lib1'])
fake_get_apps.return_value = set(['app1', 'app2'])
fake_table = Mock()
fake_pretty_table.return_value = fake_table
fake_get_dusty_containers.return_value = ['some_container']
fake_get_specs.return_value = {'apps': {'app1': get_app_dusty_schema({}, 'app1'), 'app2':get_app_dusty_schema({}, 'app2')},
'libs': {'lib1': get_lib_dusty_schema({}, 'lib1')},
'services': {'ser1': DustySchema(None, {}, 'ser1', 'services'), 'ser2': DustySchema(None, {}, 'ser2', 'services'), 'ser3': DustySchema(None, {}, 'ser3', 'services')},
'bundles': get_lib_dusty_schema({}, 'bundle')}
fake_get_docker_client.return_value = None
fake_vm_is_running.return_value = True
get_dusty_status()
call_args_list = fake_table.add_row.call_args_list
self.assertTrue(call(['app1', 'app', 'X']) in call_args_list)
self.assertTrue(call(['app2', 'app', 'X']) in call_args_list)
self.assertTrue(call(['lib1', 'lib', '']) in call_args_list)
self.assertTrue(call(['ser1', 'service', 'X']) in call_args_list)
self.assertTrue(call(['ser2', 'service', 'X']) in call_args_list)
self.assertTrue(call(['ser3', 'service', 'X']) in call_args_list)
self.assertTrue(call(['dustyInternalNginx', '', 'X']) in call_args_list)
self.assertEquals(len(call_args_list), 7)
@patch('dusty.commands.status.docker_vm_is_running')
@patch('dusty.systems.docker.get_docker_client')
@patch('dusty.commands.status.PrettyTable')
@patch('dusty.commands.status.get_dusty_containers')
@patch('dusty.schemas.base_schema_class.get_specs_from_path')
@patch('dusty.compiler.spec_assembler._get_referenced_apps')
@patch('dusty.compiler.spec_assembler._get_referenced_libs')
@patch('dusty.compiler.spec_assembler._get_referenced_services')
def test_get_dusty_status_active_2(self, fake_get_services, fake_get_libs, fake_get_apps, fake_get_specs,
fake_get_dusty_containers, fake_pretty_table, fake_get_docker_client, fake_vm_is_running):
fake_get_services.return_value = set(['ser1', 'ser2', 'ser3'])
fake_get_libs.return_value = set(['lib1'])
fake_get_apps.return_value = set(['app1', 'app2'])
fake_table = Mock()
fake_pretty_table.return_value = fake_table
fake_get_dusty_containers.return_value = []
fake_get_specs.return_value = {'apps': {'app1': get_app_dusty_schema({}, 'app1'), 'app2':get_app_dusty_schema({}, 'app2')},
'libs': {'lib1': get_lib_dusty_schema({}, 'lib1')},
'services': {'ser1': DustySchema(None, {}, 'ser1', 'services'), 'ser2': DustySchema(None, {}, 'ser2', 'services'), 'ser3': DustySchema(None, {}, 'ser3', 'services')},
'bundles': get_lib_dusty_schema({}, 'bundle')}
fake_get_docker_client.return_value = None
fake_vm_is_running.return_value = True
get_dusty_status()
call_args_list = fake_table.add_row.call_args_list
self.assertTrue(call(['app1', 'app', '']) in call_args_list)
self.assertTrue(call(['app2', 'app', '']) in call_args_list)
self.assertTrue(call(['lib1', 'lib', '']) in call_args_list)
self.assertTrue(call(['ser1', 'service', '']) in call_args_list)
self.assertTrue(call(['ser2', 'service', '']) in call_args_list)
self.assertTrue(call(['ser3', 'service', '']) in call_args_list)
self.assertTrue(call(['dustyInternalNginx', '', '']) in call_args_list)
self.assertEquals(len(call_args_list), 7)
| {
"content_hash": "f41c259ebb5d26a8ea49bc193a8899a3",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 205,
"avg_line_length": 63.80582524271845,
"alnum_prop": 0.6367924528301887,
"repo_name": "gamechanger/dusty",
"id": "2cdbdc43a6f9fe1e2fbc0e42b941e72a584fc3cc",
"size": "6572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/commands/status_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "845"
},
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "493669"
},
{
"name": "Ruby",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "3875"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field pages_new on 'Response'
db.delete_table(db.shorten_name('responses_pages_new'))
def backwards(self, orm):
# Adding M2M table for field pages_new on 'Response'
m2m_table_name = db.shorten_name('responses_pages_new')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('response', models.ForeignKey(orm[u'survey.response'], null=False)),
('pages', models.ForeignKey(orm[u'survey.pages'], null=False))
))
db.create_unique(m2m_table_name, ['response_id', 'pages_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.culture': {
'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'coder': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'core.language': {
'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"},
'abvdcode': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'core.section': {
'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.source': {
'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'year': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'survey.floatresponse': {
'Meta': {'object_name': 'FloatResponse', 'db_table': "'responses_floats'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.integerresponse': {
'Meta': {'object_name': 'IntegerResponse', 'db_table': "'responses_integers'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.optionquestion': {
'Meta': {'object_name': 'OptionQuestion', 'db_table': "'questions_option'", '_ormbases': [u'survey.Question']},
'options': ('django.db.models.fields.TextField', [], {}),
u'question_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Question']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.optionresponse': {
'Meta': {'object_name': 'OptionResponse', 'db_table': "'responses_options'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}),
'response_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.pages': {
'Meta': {'object_name': 'Pages'},
'associatedS': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Source']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pageNum': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'object_name': 'Question', 'db_table': "'questions'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.question_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'question': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'response_type': ('django.db.models.fields.CharField', [], {'default': "'Int'", 'max_length': '6'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Section']"})
},
u'survey.response': {
'Meta': {'unique_together': "(('question', 'culture'),)", 'object_name': 'Response', 'db_table': "'responses'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'codersnotes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Culture']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pages': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.response_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'source': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'sources_info'", 'symmetrical': 'False', 'to': u"orm['core.Source']"}),
'uncertainty': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'survey.textresponse': {
'Meta': {'object_name': 'TextResponse', 'db_table': "'responses_texts'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['survey'] | {
"content_hash": "a0446913004b08065e5b9887c275507f",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 198,
"avg_line_length": 80.68484848484849,
"alnum_prop": 0.5532937730038309,
"repo_name": "shh-dlce/pulotu",
"id": "86ebb93c7902c7c1733955125bf49b198d614ab6",
"size": "13337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/apps/survey/migrations/0028_auto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56056"
},
{
"name": "HTML",
"bytes": "87074"
},
{
"name": "JavaScript",
"bytes": "348481"
},
{
"name": "Python",
"bytes": "1438334"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'apply.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^', include('yard.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "25bcaa3f1da22c939ca683e26e531af4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 84,
"avg_line_length": 28.625,
"alnum_prop": 0.6397379912663755,
"repo_name": "neonsoftware/yard",
"id": "1a52f424989c72e33b359b4b87374d9cf76ad836",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/apply/apply/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48649"
},
{
"name": "Go",
"bytes": "12504"
},
{
"name": "HTML",
"bytes": "778320"
},
{
"name": "JavaScript",
"bytes": "88318"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Nginx",
"bytes": "1001"
},
{
"name": "Python",
"bytes": "81102"
},
{
"name": "Shell",
"bytes": "7501"
}
],
"symlink_target": ""
} |
import unittest
from ..test import TestCase
from .multipart_builder import MultipartBuilder
class MockPlatform:
def create_url(self, url, add_server):
if add_server:
return 'http://example.com/' + url
return url
class TestMultipartBuilder(TestCase):
def test_add(self):
mb = MultipartBuilder(MockPlatform())
mb.set_body({'foo': 'bar'})
mb.add(('report.csv', 'some,data,to,send'))
req = mb.request('/foo')
self.assertEqual(mb.body(), {'foo': 'bar'})
self.assertEqual(mb.contents(), [('attachment', ('report.csv', 'some,data,to,send'))])
self.assertEqual(req.files, [
('json', ('request.json', '{"foo": "bar"}', 'application/json')),
('attachment', ('report.csv', 'some,data,to,send'))
])
def test_multipart_mixed(self):
mb = MultipartBuilder(MockPlatform())
mb.set_body({'foo': 'bar'})
mb.add(('report.csv', 'some,data,to,send'))
mb.set_multipart_mixed(True)
req = mb.request('/foo')
self.assertTrue('multipart/mixed' in req.headers['Content-Type'], True)
self.assertEqual(mb.body(), {'foo': 'bar'})
self.assertEqual(mb.contents(), [('attachment', ('report.csv', 'some,data,to,send'))])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8aea626892b0659ee7144503de7c2a1b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 94,
"avg_line_length": 31.093023255813954,
"alnum_prop": 0.5804038893044129,
"repo_name": "grokify/ringcentral-python",
"id": "84207013588483d20972a938daac44fd0440ea02",
"size": "1378",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ringcentral/http/multipart_builder_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "871"
},
{
"name": "Python",
"bytes": "58805"
}
],
"symlink_target": ""
} |
import time
from lib.sensors import OctopusDeploySensor
class NewDeploymentSensor(OctopusDeploySensor):
def __init__(self, sensor_service, config=None, poll_interval=None):
super(NewDeploymentSensor, self).__init__(
sensor_service=sensor_service,
config=config,
poll_interval=poll_interval,
trigger_ref='octopusdeploy.new_deployment',
store_key='octopusdeploy.last_deploy_date_str')
self._logger = self._sensor_service.get_logger(__name__)
def poll(self):
self._logger.debug('Requesting list of deployments')
deployments = self._get_deployments()
# Make sure there are releases
if deployments is None:
self._logger.info('No deployments found')
return
if len(deployments) is 0:
self._logger.info('Empty list of deployments')
return
last_deployment = deployments[0]
last_assembled_date = self._to_date(last_deployment['assembled'])
# What is the last indexed release date? If none, index and exit
index_date = self._get_last_date()
self._logger.debug('Index date is %s' % index_date)
if index_date is None:
self._logger.info('Initializing index')
self._set_last_date(last_assembled_date)
index_date = self._get_last_date()
# If there have been new deployments, trigger them each
if last_assembled_date > index_date:
self._logger.info('Found deployments to trigger')
# Get deployments since the last update time
# They are in date order so once you get one behind the index
# break out of the loop
for deployment in deployments:
if self._to_date(deployment['assembled']) > index_date:
self._logger.info('Raising trigger for %s' % deployment['id'])
self._dispatch_trigger_for_payload(deployment)
else:
break
self._set_last_date(last_assembled_date)
else:
self._logger.debug('No new deployments comparing %s'
% time.strftime("%Y-%m-%dT%H:%M:%S",
last_assembled_date))
self._logger.debug('and %s'
% time.strftime("%Y-%m-%dT%H:%M:%S",
index_date))
def _get_deployments(self):
result = self.make_get_request("deployments")
releases = self._to_triggers(result['Items'])
return releases
def _get_last_deployment(self):
deployments = self._get_deployments()
if deployments is None:
return None
if len(deployments) is not 1:
return None
return deployments[0]
def _to_trigger(self, deployment):
trigger = {
'id': deployment['Id'],
'name': deployment['Name'],
'comments': deployment['Comments'],
'assembled': deployment['Created'],
'version': deployment['ReleaseId'],
'author': deployment['LastModifiedBy'],
'project_id': deployment['ProjectId']
}
return trigger
| {
"content_hash": "19a8bde924cfb2ff5188d096b447d464",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 82,
"avg_line_length": 38.65882352941176,
"alnum_prop": 0.5602556299452222,
"repo_name": "psychopenguin/st2contrib",
"id": "1572da0a85cf9b99e533b6ce7eba2cdbee6a31f7",
"size": "3286",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "packs/octopusdeploy/sensors/new_deployment_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "4592"
},
{
"name": "Python",
"bytes": "586383"
},
{
"name": "Shell",
"bytes": "15738"
}
],
"symlink_target": ""
} |
import collections.abc
from datetime import datetime
from math import ceil
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models.functions import Substr
from django.test import TestCase, skipUnlessDBFeature
from .models import (
Article, Author, Game, IsNullWithNoneAsRHS, Player, Season, Tag,
)
class LookupTests(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Authors.
cls.au1 = Author.objects.create(name='Author 1', alias='a1')
cls.au2 = Author.objects.create(name='Author 2', alias='a2')
# Create a few Articles.
cls.a1 = Article.objects.create(
headline='Article 1',
pub_date=datetime(2005, 7, 26),
author=cls.au1,
slug='a1',
)
cls.a2 = Article.objects.create(
headline='Article 2',
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug='a2',
)
cls.a3 = Article.objects.create(
headline='Article 3',
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug='a3',
)
cls.a4 = Article.objects.create(
headline='Article 4',
pub_date=datetime(2005, 7, 28),
author=cls.au1,
slug='a4',
)
cls.a5 = Article.objects.create(
headline='Article 5',
pub_date=datetime(2005, 8, 1, 9, 0),
author=cls.au2,
slug='a5',
)
cls.a6 = Article.objects.create(
headline='Article 6',
pub_date=datetime(2005, 8, 1, 8, 0),
author=cls.au2,
slug='a6',
)
cls.a7 = Article.objects.create(
headline='Article 7',
pub_date=datetime(2005, 7, 27),
author=cls.au2,
slug='a7',
)
# Create a few Tags.
cls.t1 = Tag.objects.create(name='Tag 1')
cls.t1.articles.add(cls.a1, cls.a2, cls.a3)
cls.t2 = Tag.objects.create(name='Tag 2')
cls.t2.articles.add(cls.a3, cls.a4, cls.a5)
cls.t3 = Tag.objects.create(name='Tag 3')
cls.t3.articles.add(cls.a5, cls.a6, cls.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(
Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline')
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
}
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith='Blah')
def test_in_bulk_lots_of_ids(self):
test_range = 2000
max_query_params = connection.features.max_query_params
expected_num_queries = ceil(test_range / max_query_params) if max_query_params else 1
Author.objects.bulk_create([Author() for i in range(test_range - Author.objects.count())])
authors = {author.pk: author for author in Author.objects.all()}
with self.assertNumQueries(expected_num_queries):
self.assertEqual(Author.objects.in_bulk(authors), authors)
def test_in_bulk_with_field(self):
self.assertEqual(
Article.objects.in_bulk([self.a1.slug, self.a2.slug, self.a3.slug], field_name='slug'),
{
self.a1.slug: self.a1,
self.a2.slug: self.a2,
self.a3.slug: self.a3,
}
)
def test_in_bulk_non_unique_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'author' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.in_bulk([self.au1], field_name='author')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
self.assertSequenceEqual(
Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
)
self.assertSequenceEqual(
Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertSequenceEqual(
list(Article.objects.values('id', 'headline').iterator()),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
)
# The values() method works with "extra" fields specified in extra(select).
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertSequenceEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}],
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertSequenceEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
],
)
self.assertSequenceEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
],
)
self.assertSequenceEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
],
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a nonexistent field name in values() (a field that is neither in the
# model nor in extra(select)).
msg = (
"Cannot resolve keyword 'id_plus_two' into field. Choices are: "
"author, author_id, headline, id, id_plus_one, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertSequenceEqual(
Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0),
'slug': 'a5',
}],
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
self.assertSequenceEqual(
Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
],
)
self.assertSequenceEqual(
Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
)
args = ('name', 'article__headline', 'article__tag__name')
self.assertSequenceEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
],
)
with self.assertRaises(TypeError):
Article.objects.values_list('id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>')
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>']
)
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>']
)
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='\\'),
[r'<Article: Article with \ backslash>']
)
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
]
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(Article.objects.none().iterator(), [])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(
Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`."
):
list(Article.objects.filter(id__in=Article.objects.using('other').all()))
def test_in_keeps_value_ordering(self):
query = Article.objects.filter(slug__in=['a%d' % i for i in range(1, 8)]).values('pk').query
self.assertIn(' IN (a1, a2, a3, a4, a5, a6, a7) ', str(query))
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
):
Article.objects.filter(pub_date_year='2005').count()
def test_unsupported_lookups(self):
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted, perhaps you meant startswith or istartswith?"
):
Article.objects.filter(headline__starts='Article')
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'is_null' for DateTimeField or join on the field "
"not permitted, perhaps you meant isnull?"
):
Article.objects.filter(pub_date__is_null=True)
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gobbledygook' for DateTimeField or join on the field "
"not permitted."
):
Article.objects.filter(pub_date__gobbledygook='blahblah')
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
msg = 'Related Field got invalid lookup: foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(articles__foo='bar')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
Article.objects.create(pub_date=now, headline='f')
Article.objects.create(pub_date=now, headline='fo')
Article.objects.create(pub_date=now, headline='foo')
Article.objects.create(pub_date=now, headline='fooo')
Article.objects.create(pub_date=now, headline='hey-Foo')
Article.objects.create(pub_date=now, headline='bar')
Article.objects.create(pub_date=now, headline='AbBa')
Article.objects.create(pub_date=now, headline='baz')
Article.objects.create(pub_date=now, headline='baxZ')
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
]
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>']
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>']
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
# and more articles:
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>']
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
]
)
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>']
)
def test_regex_null(self):
"""
A regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
A regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
A regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
A lookup query containing non-fields raises the proper exception.
"""
msg = "Unsupported lookup 'blahblah' for CharField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah=99)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah__exact=99)
msg = (
"Cannot resolve keyword 'blahblah' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Genuine field names don't collide with built-in lookup types
('year', 'gt', 'range', 'in' etc.) (#11670).
"""
# 'gt' is used as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
def test_exact_none_transform(self):
"""Transforms are used for __exact=None."""
Season.objects.create(year=1, nulled_text_field='not null')
self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None))
def test_exact_sliced_queryset_limit_one(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[:1]),
[self.a1, self.a2, self.a3, self.a4]
)
def test_exact_sliced_queryset_limit_one_offset(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[1:2]),
[self.a5, self.a6, self.a7]
)
def test_exact_sliced_queryset_not_limited_to_one(self):
msg = (
'The QuerySet value for an exact lookup must be limited to one '
'result using slicing.'
)
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[:2]))
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[1:]))
def test_custom_field_none_rhs(self):
"""
__exact=value is transformed to __isnull=True if Field.get_prep_value()
converts value to None.
"""
season = Season.objects.create(year=2012, nulled_text_field=None)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field=''))
def test_pattern_lookups_with_substr(self):
a = Author.objects.create(name='John Smith', alias='Johx')
b = Author.objects.create(name='Rhonda Simpson', alias='sonx')
tests = (
('startswith', [a]),
('istartswith', [a]),
('contains', [a, b]),
('icontains', [a, b]),
('endswith', [b]),
('iendswith', [b]),
)
for lookup, result in tests:
with self.subTest(lookup=lookup):
authors = Author.objects.filter(**{'name__%s' % lookup: Substr('alias', 1, 3)})
self.assertCountEqual(authors, result)
def test_custom_lookup_none_rhs(self):
"""Lookup.can_use_none_as_rhs=True allows None as a lookup value."""
season = Season.objects.create(year=2012, nulled_text_field=None)
query = Season.objects.get_queryset().query
field = query.model._meta.get_field('nulled_text_field')
self.assertIsInstance(query.build_lookup(['isnull_none_rhs'], field, None), IsNullWithNoneAsRHS)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True))
| {
"content_hash": "60e8a9d683395c60d6cb7d57e8250ecf",
"timestamp": "",
"source": "github",
"line_count": 927,
"max_line_length": 118,
"avg_line_length": 45.678532901833876,
"alnum_prop": 0.5614254675987153,
"repo_name": "charettes/django",
"id": "666fadf262f53dc823649755bd7c308c636e0289",
"size": "42344",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/lookup/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85024"
},
{
"name": "HTML",
"bytes": "224332"
},
{
"name": "JavaScript",
"bytes": "251339"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13018015"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally import osclients
from rally.plugins.openstack.cleanup import manager as resource_manager
from rally.plugins.openstack.cleanup import resources as res_cleanup
from rally.plugins.openstack.scenarios.sahara import utils
from rally.plugins.openstack.scenarios.swift import utils as swift_utils
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="sahara_output_data_sources", order=444)
class SaharaOutputDataSources(context.Context):
"""Context class for setting up Output Data Sources for an EDP job."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"output_type": {
"enum": ["swift", "hdfs"],
},
"output_url_prefix": {
"type": "string",
}
},
"additionalProperties": False,
"required": ["output_type", "output_url_prefix"]
}
@logging.log_task_wrapper(LOG.info,
_("Enter context: `Sahara Output Data Sources`"))
def setup(self):
utils.init_sahara_context(self)
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
clients = osclients.Clients(user["credential"])
sahara = clients.sahara()
if self.config["output_type"] == "swift":
swift = swift_utils.SwiftScenario(clients=clients,
context=self.context)
container_name = self.generate_random_name()
self.context["tenants"][tenant_id]["sahara"]["container"] = {
"name": swift._create_container(
container_name=container_name),
"output_swift_objects": []
}
self.setup_outputs_swift(swift, sahara, tenant_id,
container_name,
user["credential"].username,
user["credential"].password)
else:
self.setup_outputs_hdfs(sahara, tenant_id,
self.config["output_url_prefix"])
def setup_outputs_hdfs(self, sahara, tenant_id, output_url):
output_ds = sahara.data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type="hdfs",
url=output_url)
self.context["tenants"][tenant_id]["sahara"]["output"] = output_ds.id
def setup_outputs_swift(self, swift, sahara, tenant_id, container_name,
username, password):
output_ds_swift = sahara.data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type="swift",
url="swift://" + container_name + ".sahara/",
credential_user=username,
credential_pass=password)
self.context["tenants"][tenant_id]["sahara"]["output"] = (
output_ds_swift.id
)
@logging.log_task_wrapper(LOG.info,
_("Exit context: `Sahara Output Data Sources`"))
def cleanup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
if self.context["tenants"][tenant_id].get(
"sahara", {}).get("container", {}).get("name") is not None:
for swift_object in (
self.context["tenants"][tenant_id]["sahara"]["container"][
"output_swift_objects"]):
res_cleanup.SwiftObject(swift_object[1])
res_cleanup.SwiftContainer(
self.context["tenants"][tenant_id].get(
"sahara", {}).get("container", {}).get("name"))
resources = ["data_sources"]
resource_manager.cleanup(
names=["sahara.%s" % res for res in resources],
users=self.context.get("users", []))
| {
"content_hash": "ed2268d9a4aedb4df44f407738324c34",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 41.19607843137255,
"alnum_prop": 0.5437886720609234,
"repo_name": "eonpatapon/rally",
"id": "dd288650a51320d6e45cea6f0b6b59962c5b10c0",
"size": "4800",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "rally/plugins/openstack/context/sahara/sahara_output_data_sources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2566072"
},
{
"name": "Shell",
"bytes": "43361"
}
],
"symlink_target": ""
} |
"""
Created on Mon Feb 15 11:28:53 2016
@author: ih3
Wave class.
Core functions and classes that solve across a wave, or wave section.
"""
from __future__ import division
import numpy
from scipy.optimize import brentq
from scipy.integrate import odeint
from copy import deepcopy
from .state import State
def rarefaction_dwdp(w, p, q_known, wavenumber):
r"""
There is a tricky point here that needs investigation. If
the input p is used here, rather than local_state.p, then they
can diverge (when :math:`v_t` is significant) leading to overflows of g. By
using local_state we avoid the overflow, but it may mean the final
state is not very accurate.
Parameters
----------
w : tuple
primitive state (rho, v, eps)
p : scalar
pressure (required by odeint, but not used: see note above)
q_known : State
Known state
wavenumber : scalar
Wave number
"""
lr_sign = wavenumber - 1
dwdp = numpy.zeros_like(w)
rho, v, eps = w
vt = q_known.vt_from_known(rho, v, eps)
local_state = State(rho, v, vt, eps, q_known.eos)
cs = local_state.cs
h = local_state.h
W_lorentz = local_state.W_lorentz
xi = local_state.wavespeed(wavenumber)
# g quantifies the effect of tangential velocities: see the Living Review
# and original Pons et al paper for details.
g = vt**2 * (xi**2 - 1.0) / (1.0 - xi * v)**2
dwdp[0] = 1.0 / (h * cs**2)
dwdp[1] = lr_sign / (rho * h * W_lorentz**2 * cs) / numpy.sqrt(1.0 + g)
dwdp[2] = local_state.p / (rho**2 * h * cs**2)
return dwdp
def mass_flux_squared(q_start, p_end, unknown_eos=None):
r"""
Calculates the square of the mass flux through a region, given the state at
the start of the region and the pressure at the end.
Parameters
----------
q_start : State
State at start of the region
p_end : scalar
Pressure at the end of the region
unknown_eos : dictionary, optional
Equation of state in the region (provided if different from EoS
of q_start)
"""
if unknown_eos is None:
unknown_eos = q_start.eos
def shock_root_rho(rho):
h = unknown_eos['h_from_rho_p'](rho, p_end)
return (h**2 - q_start.h**2) - \
(h/rho + q_start.h/q_start.rho) * (p_end - q_start.p)
if p_end >= q_start.p:
# Shock
min_rho = q_start.rho
shock_root_min = shock_root_rho(min_rho)
max_rho = numpy.sqrt(p_end/q_start.p) * q_start.rho
shock_root_max = shock_root_rho(max_rho)
while(shock_root_min * shock_root_max > 0.0):
min_rho /= 1.001 # Not sure - could end up with unphysical root?
max_rho *= 10.0
shock_root_min = shock_root_rho(min_rho)
shock_root_max = shock_root_rho(max_rho)
else:
# Deflagration
max_rho = q_start.rho
shock_root_max = shock_root_rho(max_rho)
min_rho = numpy.sqrt(p_end/q_start.p) * q_start.rho
shock_root_min = shock_root_rho(min_rho)
while(shock_root_min * shock_root_max > 0.0):
min_rho /= 10.0 # Not sure - could end up with unphysical root?
max_rho *= 1.001
shock_root_min = shock_root_rho(min_rho)
shock_root_max = shock_root_rho(max_rho)
rho = brentq(shock_root_rho, min_rho, max_rho)
h = unknown_eos['h_from_rho_p'](rho, p_end)
eps = h - 1.0 - p_end / rho
dp = p_end - q_start.p
dh2 = h**2 - q_start.h**2
j2 = -dp / (dh2 / dp - 2.0 * q_start.h / q_start.rho)
return j2, rho, eps, dp
def deflagration_root(p_0_star, q_precursor, unknown_eos, wavenumber, label):
"""
Find the CJ deflagration.
Find the limiting case between stable and unstable deflagrations. Can also
be used for the detonation case.
Parameters
----------
p_0_star : double
Pressure to match to.
q_precursor : State
Known State
unknown_eos : EOS
Equation of State after the reaction has taken place
wavenumber : int
Indicates if this is the left (0), central (1) or right (2) wave
label : string
Optional label of the resulting State
Returns
-------
residual : double
Residual function to be minimized
"""
lr_sign = wavenumber - 1
j2, rho, eps, dp = mass_flux_squared(q_precursor, p_0_star, unknown_eos)
if j2 < 0:
return 10.0 # Unphysical part of Crussard curve, return a random number
j = numpy.sqrt(j2)
v_deflagration = (q_precursor.rho**2 *
q_precursor.W_lorentz**2 * q_precursor.v + \
lr_sign * j**2 * \
numpy.sqrt(1.0 + q_precursor.rho**2 *
q_precursor.W_lorentz**2 *
(1.0 - q_precursor.v**2) / j**2)) / \
(q_precursor.rho**2 * q_precursor.W_lorentz**2 + j**2)
W_lorentz_deflagration = 1.0 / numpy.sqrt(1.0 - v_deflagration**2)
v = (q_precursor.h * q_precursor.W_lorentz *
q_precursor.v + lr_sign * dp *
W_lorentz_deflagration / j) / \
(q_precursor.h * q_precursor.W_lorentz + dp * (1.0 /
q_precursor.rho / q_precursor.W_lorentz + \
lr_sign * q_precursor.v *
W_lorentz_deflagration / j))
vt = q_precursor.vt_from_known(rho, v, eps)
q_unknown = State(rho, v, vt, eps, unknown_eos, label)
return q_unknown.wavespeed(wavenumber) - v_deflagration
def precursor_root(p_0_star, q_known, wavenumber):
"""
Find the precursor shock.
For a detonation, the temperature needs to be raised across a shock for the
reaction to take place.
Parameters
----------
p_0_star : double
Pressure to match to.
q_known : State
Known State
wavenumber : int
Indicates if this is the left (0), central (1) or right (2) wave
Returns
-------
residual : double
Residual function to be minimized
"""
shock = Shock(q_known, p_0_star, wavenumber)
q_precursor = shock.q_end
t_precursor = q_precursor.eos['t_from_rho_eps'](
q_precursor.rho, q_precursor.eps)
t_i = q_precursor.eos['t_ignition'](q_precursor.rho, q_precursor.eps)
return t_precursor - t_i
# NOTE: all subclasses begin with initialising type, name, wavenumber etc.
# Can avoid some repeated code by passing these as arguments to
# superclass constructer and calling that.
# NOTE: To avoid more repeated code: wave speed calculation appears to
# consist of one or two main parts - a shock wave bit and a burning
# wave bit. The code for these two sections is almost identical for
# all subclasses of WaveSection.
# Could therefore make define functions calculate_shock_speed and
# calculate_burning_speed for WaveSection class, which are then
# called by its subclasses
def post_discontinuity_state(p_star, q_start, lr_sign, label, j2, rho, eps, dp,
eos_end = None):
"""
Give the state across a discontinuity.
This code is common to all discontinuities.
Parameters
----------
p_star : double
Post-discontinuity pressure
q_start : State
Known State
lr_sign : int
-1 for a left going wave, +1 for a right going wave
label : string
Optional label of the post-shock State
j2 : double
Square of the mass flux across the wave
rho : double
Post-discontinuity density
eps : double
Post-discontinuity specific internal energy
dp : double
Jump in pressure across the discontinuity
eos_end : EOS
Equation of State on the other side of the discontinuity, if different
Returns
-------
v_shock : double
Shock speed
q_end : State
State on the other side of the discontinuity.
"""
if eos_end is None:
eos_end = q_start.eos
j = numpy.sqrt(j2)
v_shock = (q_start.rho**2 * q_start.W_lorentz**2 * q_start.v + \
lr_sign * j**2 * \
numpy.sqrt(1.0 + q_start.rho**2 * q_start.W_lorentz**2 * (1.0 - q_start.v**2) / j**2)) / \
(q_start.rho**2 * q_start.W_lorentz**2 + j**2)
W_lorentz_shock = 1.0 / numpy.sqrt(1.0 - v_shock**2)
v = (q_start.h * q_start.W_lorentz * q_start.v + lr_sign * dp * W_lorentz_shock / j) / \
(q_start.h * q_start.W_lorentz + dp * (1.0 / q_start.rho / q_start.W_lorentz + \
lr_sign * q_start.v * W_lorentz_shock / j))
vt = q_start.vt_from_known(rho, v, eps)
q_end = State(rho, v, vt, eps, eos_end, label=label)
return v_shock, q_end
class WaveSection(object):
"""
A wave section is a single type of solution where the State varies.
Parameters
----------
q_start : State
The known state on one side of the wave
p_end : scalar
Pressure in the region of unknown state
wavenumber : scalar
Characterises direction of travel of wave
Attributes
----------
wavenumber : int
Indicates if this is the left (0), central (1) or right (2) wave
wavespeed : list of doubles
Speed of the start and end of the WaveSection
name : string
LaTeX string giving the name of the WaveSection
q_start : State
State at the start of the WaveSection
q_end : State
State at the end of the WaveSection
trivial : boolean
True if the State does not change across the WaveSection
"""
def __init__(self, q_start, p_end, wavenumber):
"""
A part of a wave. For a single shock or rarefaction, this will be the
complete wave. For a deflagration or detonation, it may form part of
the full wave.
"""
# NOTE: what does self.trivial mean?
self.trivial = False
assert(wavenumber in [0, 1, 2]), "wavenumber must be 0, 1, 2"
self.wavenumber = wavenumber
self.name = None
self.q_start = None
self.q_end = None
self.wavespeed = []
self.type = ""
def latex_string(self):
"""
Text description of the WaveSection
Returns
-------
s : string
Description of the WaveSection; types and direction, plus speeds.
"""
if self.trivial:
return ""
else:
s = deepcopy(self.name)
s += r": \lambda^{{({})}}".format(self.wavenumber)
if len(self.wavespeed) > 1:
s += r"\in [{:.4f}, {:.4f}]".format(self.wavespeed[0],
self.wavespeed[-1])
else:
s += r"= {:.4f}".format(self.wavespeed[0])
return s
def _repr_latex_(self):
s = r"\begin{equation}" + self.latex_string() + r"\end{equation}"
return s
def __repr__(self):
return self.type
def plotting_data(self):
r"""
Returns data across the wave section for plotting.
Returns
-------
xi : numpy array of double
Characteristic coordinate of data
data : numpy array of double
Data (:math:`\rho, v_x, v_t, \epsilon, p, W, h, c_s`) at each point
"""
if self.trivial:
data = numpy.zeros((0,8))
xi = numpy.zeros((0,))
else:
data = numpy.vstack((self.q_start.state(), self.q_end.state()))
xi = numpy.array([self.wavespeed[0], self.wavespeed[0]])
return xi, data
# NOTE: this class has a different signature to all other subclasses of
# WaveSection (q_end rather than p_end). Might be more consistent
# to use the same signature for all subclasses - all could
# take argument q_end and access variable q_end.p.
class Contact(WaveSection):
"""
A linear discontinuity. This will always be the central wave (wavenumber=1)
for the hydrodynamic case.
"""
def __init__(self, q_start, q_end, wavenumber):
self.trivial = False
assert(wavenumber in [1]), "wavenumber for a Contact must be 1"
self.type = "Contact"
self.wavenumber = wavenumber
self.q_start = deepcopy(q_start)
self.q_end = deepcopy(q_end)
self.name = r"{\cal C}"
self.wavespeed = [q_start.v]
if numpy.allclose(q_start.state(), q_end.state()):
self.trivial = True
self.name = ""
assert(numpy.allclose(q_start.v, q_end.v)), "Velocities of states "\
"must match for a contact"
assert(numpy.allclose(q_start.p, q_end.p)), "Pressures of states "\
"must match for a contact"
assert(numpy.allclose(q_start.wavespeed(wavenumber),
q_end.wavespeed(wavenumber))), "Wavespeeds of "\
"states must match for a contact"
class Rarefaction(WaveSection):
"""
A continuous wave section across which pressure decreases.
"""
def __init__(self, q_start, p_end, wavenumber):
self.trivial = False
assert(wavenumber in [0, 2]), "wavenumber for a Rarefaction "\
"must be in 0, 2"
assert(q_start.p >= p_end), "For a rarefaction, p_start >= p_end"
self.type = "Rarefaction"
self.wavenumber = wavenumber
self.q_start = deepcopy(q_start)
self.name = r"{\cal R}"
if self.wavenumber == 0:
label = r"\star_L"
self.name += r"_{\leftarrow}"
else:
label = r"\star_R"
self.name += r"_{\rightarrow}"
v_known = q_start.wavespeed(self.wavenumber)
self.wavespeed = []
if numpy.allclose(q_start.p, p_end):
self.trivial = True
self.q_end = State(q_start.rho, q_start.v, q_start.vt, q_start.eps,
q_start.eos, label=label)
v_unknown = v_known
self.name = ""
else:
w_all = odeint(rarefaction_dwdp,
numpy.array([q_start.rho, q_start.v, q_start.eps]),
[q_start.p, p_end], rtol = 1e-12, atol = 1e-10,
args=((q_start, self.wavenumber)))
self.q_end = State(w_all[-1, 0], w_all[-1, 1],
q_start.vt_from_known(w_all[-1, 0], w_all[-1, 1], w_all[-1, 2]),
w_all[-1, 2], q_start.eos, label=label)
v_unknown = self.q_end.wavespeed(self.wavenumber)
if self.wavenumber == 0:
self.wavespeed = numpy.array([v_known, v_unknown])
else:
self.wavespeed = numpy.array([v_unknown, v_known])
def plotting_data(self):
# TODO: make the number of points in the rarefaction plot a parameter
if self.trivial:
xi = numpy.zeros((0,))
data = numpy.zeros((0,8))
else:
p = numpy.linspace(self.q_start.p, self.q_end.p, 500)
w_all = odeint(rarefaction_dwdp,
numpy.array([self.q_start.rho,
self.q_start.v, self.q_start.eps]),
p, rtol = 1e-12, atol = 1e-10,
args=(self.q_start, self.wavenumber))
data = numpy.zeros((len(p),8))
xi = numpy.zeros_like(p)
for i in range(len(p)):
state = State(w_all[i,0], w_all[i,1],
self.q_start.vt_from_known(w_all[i,0], w_all[i,1], w_all[i,2]),
w_all[i, 2], self.q_start.eos)
xi[i] = state.wavespeed(self.wavenumber)
data[i,:] = state.state()
return xi, data
class Shock(WaveSection):
"""
A discontinuous wave section across which pressure increases.
"""
def __init__(self, q_start, p_end, wavenumber):
self.trivial = False
assert(wavenumber in [0, 2]), "wavenumber for a Shock "\
"must be in 0, 2"
# As we use the Shock code for deflagration checks, we can't apply
# this check
#assert(q_start.p <= p_end), "For a shock, p_start <= p_end"
self.type = "Shock"
self.wavenumber = wavenumber
lr_sign = self.wavenumber - 1
self.q_start = deepcopy(q_start)
self.name = r"{\cal S}"
if self.wavenumber == 0:
label = r"\star_L"
self.name += r"_{\leftarrow}"
else:
label = r"\star_R"
self.name += r"_{\rightarrow}"
if numpy.allclose(q_start.p, p_end):
self.trivial = True
self.q_end = State(q_start.rho, q_start.v, q_start.vt, q_start.eps,
q_start.eos, label=label)
v_shock = q_start.wavespeed(self.wavenumber)
self.name = ""
else:
j2, rho, eps, dp = mass_flux_squared(q_start, p_end,
q_start.eos)
v_shock, self.q_end = post_discontinuity_state(p_end,
q_start, lr_sign, label, j2, rho, eps, dp)
self.wavespeed = [v_shock]
# TODO: Check that q is correctly initialized across each wave in det, defl.
class Deflagration(WaveSection):
"""
A discontinuous wave section across which pressure decreases and a reaction
takes place.
"""
def __init__(self, q_start, p_end, wavenumber):
eos_end = q_start.eos['eos_inert']
t_i = q_start.eos['t_ignition'](q_start.rho, q_start.eps)
self.trivial = False
assert(wavenumber in [0, 2]), "wavenumber for a Deflagration "\
"must be in 0, 2"
assert(q_start.p >= p_end), "For a deflagration, p_start >= p_end"
# t_start = q_start.eos['t_from_rho_eps'](q_start.rho, q_start.eps)
# assert(t_start >= t_i), "For a deflagration, temperature of start "\
# "state must be at least the ignition temperature"
# TODO The above check should be true, but the root-find sometimes just
# misses. numpy allclose type check?
self.type = "Deflagration"
self.wavenumber = wavenumber
lr_sign = self.wavenumber - 1
self.q_start = deepcopy(q_start)
self.name = r"{\cal WDF}"
if self.wavenumber == 0:
label = r"\star_L"
self.name += r"_{\leftarrow}"
else:
label = r"\star_R"
self.name += r"_{\rightarrow}"
v_known = q_start.wavespeed(self.wavenumber)
if numpy.allclose(q_start.p, p_end):
self.trivial = True
self.q_end = State(q_start.rho, q_start.v, q_start.vt, q_start.eps,
eos_end, label=label)
v_deflagration = v_known
self.name = ""
else:
# This is a single deflagration, so the start state must be at the
# reaction temperature already.
j2, rho, eps, dp = mass_flux_squared(q_start, p_end, eos_end)
v_deflagration, q_unknown = post_discontinuity_state(
p_end, q_start, lr_sign, label, j2, rho, eps, dp,
eos_end)
# If the speed in the unknown state means the characteristics are
# not going into the deflagration, then this is an unstable strong
# deflagration
if (lr_sign*(q_unknown.wavespeed(self.wavenumber) - v_deflagration) < 0):
p_cjdf = brentq(deflagration_root, (1.0+1e-9)*p_end,
(1.0-1e-9)*q_start.p,
args=(q_start, eos_end, self.wavenumber, label))
j2, rho, eps, dp = mass_flux_squared(q_start, p_cjdf, eos_end)
v_deflagration, q_unknown = post_discontinuity_state(
p_cjdf, q_start, lr_sign, label, j2, rho, eps,
dp, eos_end)
self.name = r"{\cal CJDF}"
if self.wavenumber == 0:
label = r"\star_L"
self.name += r"_{\leftarrow}"
else:
label = r"\star_R"
self.name += r"_{\rightarrow}"
self.q_end = deepcopy(q_unknown)
if q_start.cs - self.q_end.cs < 0:
raise UnphysicalSolution("There is no physical solution")
self.wavespeed = [v_deflagration]
class Detonation(WaveSection):
"""
A discontinuous wave section across which pressure increases and a reaction
takes place.
"""
def __init__(self, q_start, p_end, wavenumber):
eos_end = q_start.eos['eos_inert']
t_i = q_start.eos['t_ignition'](q_start.rho, q_start.eps)
self.trivial = False
assert(wavenumber in [0, 2]), "wavenumber for a Detonation "\
"must be in 0, 2"
assert(q_start.p <= p_end), "For a detonation, p_start <= p_end"
#t_start = q_start.eos['t_from_rho_eps'](q_start.rho, q_start.eps)
#assert(t_start >= t_i), "For a detonation, temperature of start "\
#"state must be at least the ignition temperature"
self.type = "Detonation"
self.wavenumber = wavenumber
lr_sign = self.wavenumber - 1
self.q_start = deepcopy(q_start)
self.name = r"{\cal SDT}"
if self.wavenumber == 0:
label = r"\star_L"
self.name += r"_{\leftarrow}"
else:
label = r"\star_R"
self.name += r"_{\rightarrow}"
v_known = q_start.wavespeed(self.wavenumber)
if numpy.allclose(q_start.p, p_end):
self.trivial = True
self.q_end = State(q_start.rho, q_start.v, q_start.vt, q_start.eps,
eos_end, label=label)
v_detonation = v_known
self.name = ""
else:
# This is a single detonation, so the start state must be at the
# reaction temperature already.
j2, rho, eps, dp = mass_flux_squared(q_start, p_end, eos_end)
if j2 < 0:
# The single detonation is unphysical - must be unstable weak
# detonation. So skip the calculation and make sure the CJ
# calculation runs
# print("Should be a CJ detonation")
q_unknown = deepcopy(q_start)
v_detonation = q_unknown.wavespeed(self.wavenumber) + lr_sign
else:
v_detonation, q_unknown = post_discontinuity_state(
p_end, q_start, lr_sign, label, j2,
rho, eps, dp, eos_end)
# If the speed in the unknown state means the characteristics are
# not going into the detonation, then this is an unstable weak
# detonation
if (lr_sign*(q_unknown.wavespeed(self.wavenumber) - v_detonation) < 0):
pmin = (1.0+1e-9)*min(q_start.p, p_end)
pmax = max(q_start.p, p_end)
fmin = deflagration_root(pmin, q_start, eos_end, self.wavenumber, label)
fmax = deflagration_root(pmax, q_start, eos_end, self.wavenumber, label)
while fmin * fmax > 0:
pmax *= 2.0
fmax = deflagration_root(pmax, q_start, eos_end, self.wavenumber, label)
p_cjdt = brentq(deflagration_root, pmin, pmax,
args=(q_start, eos_end, self.wavenumber, label))
j2, rho, eps, dp = mass_flux_squared(q_start, p_cjdt, eos_end)
v_detonation, q_unknown = post_discontinuity_state(
p_cjdt, q_start, lr_sign, label, j2, rho,
eps, dp, eos_end)
self.name = r"{\cal CJDT}"
if self.wavenumber == 0:
label = r"\star_L"
self.name += r"_{\leftarrow}"
else:
label = r"\star_R"
self.name += r"_{\rightarrow}"
self.q_end = deepcopy(q_unknown)
self.wavespeed = numpy.array([v_detonation])
def build_inert_wave_section(q_known, unknown_value, wavenumber):
"""
Object factory for the WaveSection; non-reactive case
Parameters
----------
q_known : State
The known state on one side of the wave
unknown_value : scalar
Pressure in the region of unknown state
wavenumber : scalar
Characterises direction of travel of wave
Returns
-------
wavesections : list
List of WaveSections (in this case, one or none)
"""
if wavenumber == 1:
return [Contact(q_known, unknown_value, wavenumber)]
elif q_known.p < unknown_value:
return [Shock(q_known, unknown_value, wavenumber)]
else:
return [Rarefaction(q_known, unknown_value, wavenumber)]
def build_reactive_wave_section(q_known, unknown_value, wavenumber):
"""
Object factory for the WaveSection; reactive case
Parameters
----------
q_known : State
The known state on one side of the wave
unknown_value : scalar
Pressure in the region of unknown state
wavenumber : scalar
Characterises direction of travel of wave
Returns
-------
wavesections : list
List of WaveSections
"""
t_i = q_known.eos['t_ignition'](q_known.rho, q_known.eps)
if wavenumber == 1:
return Contact(q_known, unknown_value, wavenumber)
else:
wavesections = []
if q_known.p < unknown_value:
# The detonation wave
detonation = Detonation(q_known, unknown_value, wavenumber)
wavesections.append(detonation)
q_next = deepcopy(detonation.q_end)
# Finally, was it a CJ detonation?
if q_next.p > unknown_value:
rarefaction = Rarefaction(q_next, unknown_value, wavenumber)
wavesections.append(rarefaction)
else:
t_known = q_known.eos['t_from_rho_eps'](q_known.rho, q_known.eps)
t_i = q_known.eos['t_ignition'](q_known.rho, q_known.eps)
if t_known < t_i: # Need a precursor shock
p_min = unknown_value
p_max = q_known.p
t_min = precursor_root(p_min, q_known, wavenumber)
t_max = precursor_root(p_max, q_known, wavenumber)
assert(t_min < 0)
if t_max <= 0:
p_max *= 2
t_max = precursor_root(p_max, q_known, wavenumber)
p_0_star = brentq(precursor_root, p_min, p_max,
args=(q_known, wavenumber))
precursor_shock = Shock(q_known, p_0_star, wavenumber)
wavesections.append(precursor_shock)
q_next = precursor_shock.q_end
q_next.q = q_known.q # No reaction across inert precursor
q_next.eos = q_known.eos
else: # No precursor shock
q_next = deepcopy(q_known)
# Next, the deflagration wave
deflagration = Deflagration(q_next, unknown_value, wavenumber)
wavesections.append(deflagration)
q_next = deepcopy(deflagration.q_end)
# Finally, was it a CJ deflagration?
if q_next.p > unknown_value:
rarefaction = Rarefaction(q_next, unknown_value, wavenumber)
wavesections.append(rarefaction)
return wavesections
class Wave(object):
"""
A wave is a union of wave sections, separating constant states.
In the inert case each wave contains a single wave section. In the reactive
case the nonlinear waves may contain multiple wave sections. The nonlinear
(left and right, acoustic) waves are rarefactions or discontinuities. The
discontinuities may be shocks, or deflagrations or detonations (reactive
case only).
Parameters
----------
self : Wave
The wave, which has a known state on one side and an unknown
state on the other side.
q_known : State
The known state on one side of the wave
unknown_value : scalar
Pressure in the region of unknown state
wavenumber : scalar
characterises direction of travel of wave
Attributes
----------
wavenumber : int
Indicates if this is the left (0), central (1) or right (2) wave
wavespeed : list of doubles
Speed of the left and right edges of the wave
wave_sections: list of WaveSections
All WaveSections in the Wave (list is empty for a trivial wave)
name : string
LaTeX string giving the name of the wave
q_l : State
State to the left of the Wave
q_r : State
State to the right of the Wave
trivial : boolean
True if the State does not change across the Wave
"""
def __init__(self, q_known, unknown_value, wavenumber):
# NOTE: it's not so clear what wavenumber is - change to something like a wavedirection variable which can be left/right/static?
self.wavenumber = wavenumber
self.wave_sections = []
self.wavespeed = []
if 'q_available' not in q_known.eos:
waves = build_inert_wave_section(q_known, unknown_value,
wavenumber)
for sections in waves:
self.wave_sections.append(sections)
else:
waves = build_reactive_wave_section(q_known, unknown_value,
wavenumber)
for sections in waves:
self.wave_sections.append(sections)
self.name = self.wave_sections_latex_string()
if wavenumber == 0:
self.q_l = deepcopy(q_known)
if self.wave_sections:
self.q_r = self.wave_sections[-1].q_end
else:
self.q_r = deepcopy(self.q_l)
elif wavenumber == 1:
self.q_l = deepcopy(q_known)
self.q_r = deepcopy(q_known)
else:
self.q_r = deepcopy(q_known)
if self.wave_sections:
self.q_l = self.wave_sections[-1].q_end
else:
self.q_l = deepcopy(self.q_r)
minspeed = 10
maxspeed = -10
if self.wave_sections:
for wavesection in self.wave_sections:
for speed in wavesection.wavespeed:
minspeed = min(speed, minspeed)
maxspeed = max(speed, maxspeed)
self.wavespeed.append(minspeed)
if not numpy.allclose(minspeed, maxspeed):
self.wavespeed.append(maxspeed)
self.trivial = True
if self.wave_sections:
for wavesection in self.wave_sections:
if not wavesection.trivial:
self.trivial = False
if self.trivial:
self.wavespeed = []
def plotting_data(self):
r"""
Returns data across the wave for plotting.
Returns
-------
xi_wave : numpy array of double
Characteristic coordinate of data
data_wave : numpy array of double
Data (:math:`\rho, v_x, v_t, \epsilon, p, W, h, c_s`) at each point
"""
xi_wave = numpy.zeros((0,))
data_wave = numpy.zeros((0,8))
for wavesection in self.wave_sections:
xi_section, data_section = wavesection.plotting_data()
xi_wave = numpy.hstack((xi_wave, xi_section))
data_wave = numpy.vstack((data_wave, data_section))
if self.wavenumber == 2:
xi_wave = xi_wave[-1::-1]
data_wave = data_wave[-1::-1,:]
return xi_wave, data_wave
def wave_sections_latex_string(self):
"""
Text description of the WaveSections
Returns
-------
s : string
Description of the type and direction of each WaveSection
"""
names = []
sections = deepcopy(self.wave_sections)
if self.wavenumber == 2:
sections.reverse()
for sec in sections:
if not sec.trivial:
names.append(sec.name)
s = ""
if len(names)==1:
s = names[0]
elif len(names)>1:
s = r"\left("
for n in names:
s += n
s += r"\right) "
return s
def latex_string(self):
"""
Text description of the Wave
Returns
-------
s : string
Description of the Wave; types and direction of WaveSections, plus
speeds.
"""
s = self.wave_sections_latex_string()
speeds = []
sections = deepcopy(self.wave_sections)
if self.wavenumber == 2:
sections.reverse()
for sec in sections:
if not sec.trivial:
for speed in sec.wavespeed:
speeds.append(speed)
if len(speeds) == 0:
return ""
elif len(speeds) == 1:
s += r": \lambda^{{({})}}".format(self.wavenumber)
s += r"= {:.4f}".format(speeds[0])
else:
s += r": \lambda^{{({})}}".format(self.wavenumber)
s += r"\in [{:.4f}, {:.4f}]".format(min(speeds), max(speeds))
return s
def _repr_latex_(self):
s = r"\begin{equation}" + self.latex_string() + r"\end{equation}"
return s
class UnphysicalSolution(Exception):
""" Solution to the problem is unphysical """
pass
| {
"content_hash": "7a1d46acfafe9081bbd50b6bc7590442",
"timestamp": "",
"source": "github",
"line_count": 948,
"max_line_length": 136,
"avg_line_length": 35.151898734177216,
"alnum_prop": 0.5547353258912495,
"repo_name": "harpolea/r3d2",
"id": "ae8c39abd64eacb41655609f66a3377c63cc2f5b",
"size": "33348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "r3d2/wave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2691815"
},
{
"name": "Python",
"bytes": "95428"
},
{
"name": "TeX",
"bytes": "3989"
}
],
"symlink_target": ""
} |
import json
import pytest
HEALTH_PATH = '/management/health'
@pytest.mark.parametrize("https", [True, False])
def test_get_health(client, https):
base_url = '%s://localhost' % ('https' if https else 'http')
# Invalid credentials
response = client.get(
HEALTH_PATH,
base_url=base_url
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert data.get('alive')
| {
"content_hash": "ea09c11afc861fa91c141eb2f9f54032",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 22.3,
"alnum_prop": 0.647982062780269,
"repo_name": "EclecticIQ/OpenTAXII",
"id": "2577e2f2a6c6859a8fecd94fe7ef8fc6055799e9",
"size": "446",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_health.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "237268"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
} |
import agol
import ags
from security import *
from common import *
import _abstract
import web
import manageorg
import manageags
import manageportal
import hostedservice
#import webmap
from geometryservice import *
__version__ = "2.0.100" | {
"content_hash": "a52a10bda971203e409e73e774e5db9a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 29,
"avg_line_length": 18.307692307692307,
"alnum_prop": 0.8067226890756303,
"repo_name": "achapkowski/ArcREST",
"id": "ecd74a438371f9e0b406f2cb1df136bfb1a4c955",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arcrest/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1234325"
}
],
"symlink_target": ""
} |
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import re
import warnings
# Bokeh imports
from bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning
from tests.support.util.api import verify_all
from tests.support.util.types import Capture
# Module under test
import bokeh as b # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'__version__',
'license',
'sampledata',
)
_LICENSE = """\
Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Anaconda nor the names of any contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = verify_all(b, ALL)
def test___version___type() -> None:
assert isinstance(b.__version__, str)
def test___version___defined() -> None:
VERSION_PAT = re.compile(r"^(\d+\.\d+\.\d+)((?:\.dev|\.rc).*)?")
assert VERSION_PAT.match(b.__version__.strip(".dirty"))
def test_license(capsys: Capture) -> None:
b.license()
out, err = capsys.readouterr()
assert out == _LICENSE
class TestWarnings:
@pytest.mark.parametrize('cat', (BokehDeprecationWarning, BokehUserWarning))
def test_bokeh_custom(self, cat) -> None:
r = warnings.formatwarning("message", cat, "line", "lineno")
assert r == "%s: %s\n" %(cat.__name__, "message")
def test_general_default(self) -> None:
r = warnings.formatwarning("message", RuntimeWarning, "line", "lineno")
assert r == "line:lineno: RuntimeWarning: message\n"
# TODO (bev) issue with this one test and 3.9 support PR
@pytest.mark.skip
def test_filters(self) -> None:
assert ('always', None, BokehUserWarning, None, 0) in warnings.filters
assert ('always', None, BokehDeprecationWarning, None, 0) in warnings.filters
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "ba3e6f23acfc39ad00d5f73597dcd6a8",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 85,
"avg_line_length": 38.41121495327103,
"alnum_prop": 0.5630170316301704,
"repo_name": "bokeh/bokeh",
"id": "913899e8c201e399bcb73202df63a3bc2ae14a94",
"size": "4614",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "tests/unit/bokeh/test___init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
} |
"""
core
~~~~
Core functionality shared between the extension and the decorator.
:copyright: (c) 2022 by Ashley Sommer (based on flask-cors by Cory Dolphin).
:license: MIT, see LICENSE for more details.
"""
import re
import logging
import collections
from datetime import timedelta
from typing import Dict
try:
# Sanic compat Header from Sanic v19.9.0 and above
from sanic.compat import Header as CIMultiDict
except ImportError:
try:
# Sanic server CIMultiDict from Sanic v0.8.0 and above
from sanic.server import CIMultiDict
except ImportError:
raise RuntimeError("Your version of sanic does not support "
"CIMultiDict")
LOG = logging.getLogger(__name__)
# Response Headers
ACL_ORIGIN = 'Access-Control-Allow-Origin'
ACL_METHODS = 'Access-Control-Allow-Methods'
ACL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ACL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers'
ACL_CREDENTIALS = 'Access-Control-Allow-Credentials'
ACL_MAX_AGE = 'Access-Control-Max-Age'
# Request Header
ACL_REQUEST_METHOD = 'Access-Control-Request-Method'
ACL_REQUEST_HEADERS = 'Access-Control-Request-Headers'
ALL_METHODS = ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE']
CONFIG_OPTIONS = ['CORS_ORIGINS', 'CORS_METHODS', 'CORS_ALLOW_HEADERS',
'CORS_EXPOSE_HEADERS', 'CORS_SUPPORTS_CREDENTIALS',
'CORS_MAX_AGE', 'CORS_SEND_WILDCARD',
'CORS_AUTOMATIC_OPTIONS', 'CORS_VARY_HEADER',
'CORS_RESOURCES', 'CORS_INTERCEPT_EXCEPTIONS',
'CORS_ALWAYS_SEND']
# Attribute added to request object by decorator to indicate that CORS
# was evaluated, in case the decorator and extension are both applied
# to a view.
# TODO: Refactor these two flags down into one flag.
SANIC_CORS_EVALUATED = '_sanic_cors_e'
SANIC_CORS_SKIP_RESPONSE_MIDDLEWARE = "_sanic_cors_srm"
# Strange, but this gets the type of a compiled regex, which is otherwise not
# exposed in a public API.
RegexObject = type(re.compile(''))
DEFAULT_OPTIONS = dict(origins='*',
methods=ALL_METHODS,
allow_headers='*',
expose_headers=None,
supports_credentials=False,
max_age=None,
send_wildcard=False,
automatic_options=True,
vary_header=True,
resources=r'/*',
intercept_exceptions=True,
always_send=True)
def parse_resources(resources):
if isinstance(resources, dict):
# To make the API more consistent with the decorator, allow a
# resource of '*', which is not actually a valid regexp.
resources = [(re_fix(k), v) for k, v in resources.items()]
# Sort by regex length to provide consistency of matching and
# to provide a proxy for specificity of match. E.G. longer
# regular expressions are tried first.
def pattern_length(pair):
maybe_regex, _ = pair
return len(get_regexp_pattern(maybe_regex))
return sorted(resources,
key=pattern_length,
reverse=True)
elif isinstance(resources, str):
return [(re_fix(resources), {})]
elif isinstance(resources, collections.abc.Iterable):
return [(re_fix(r), {}) for r in resources]
# Type of compiled regex is not part of the public API. Test for this
# at runtime.
elif isinstance(resources, RegexObject):
return [(re_fix(resources), {})]
else:
raise ValueError("Unexpected value for resources argument.")
def get_regexp_pattern(regexp):
"""
Helper that returns regexp pattern from given value.
:param regexp: regular expression to stringify
:type regexp: _sre.SRE_Pattern or str
:returns: string representation of given regexp pattern
:rtype: str
"""
try:
return regexp.pattern
except AttributeError:
return str(regexp)
def get_cors_origins(options, request_origin):
origins = options.get('origins')
wildcard = r'.*' in origins
# If the Origin header is not present terminate this set of steps.
# The request is outside the scope of this specification.-- W3Spec
if request_origin:
LOG.debug("CORS request received with 'Origin' %s", request_origin)
# If the allowed origins is an asterisk or 'wildcard', always match
if wildcard and options.get('send_wildcard'):
LOG.debug("Allowed origins are set to '*'. Sending wildcard CORS header.")
return ['*']
# If the value of the Origin header is a case-sensitive match
# for any of the values in list of origins
elif try_match_any(request_origin, origins):
LOG.debug("The request's Origin header matches. Sending CORS headers.", )
# Add a single Access-Control-Allow-Origin header, with either
# the value of the Origin header or the string "*" as value.
# -- W3Spec
return [request_origin]
else:
LOG.debug("The request's Origin header does not match any of allowed origins.")
return None
elif options.get('always_send'):
if wildcard:
# If wildcard is in the origins, even if 'send_wildcard' is False,
# simply send the wildcard. It is the most-likely to be correct
# thing to do (the only other option is to return nothing, which)
# pretty is probably not whawt you want if you specify origins as
# '*'
return ['*']
else:
# Return all origins that are not regexes.
return sorted([o for o in origins if not probably_regex(o)])
# Terminate these steps, return the original request untouched.
else:
LOG.debug("The request did not contain an 'Origin' header. "
"This means the browser or client did not request CORS, ensure the Origin Header is set.")
return None
def get_allow_headers(options, acl_request_headers):
if acl_request_headers:
request_headers = [h.strip() for h in acl_request_headers.split(',')]
# any header that matches in the allow_headers
matching_headers = filter(
lambda h: try_match_any(h, options.get('allow_headers')),
request_headers
)
return ', '.join(sorted(matching_headers))
return None
def get_cors_headers(options: Dict, request_headers: CIMultiDict, request_method):
found_origins_list = request_headers.getall('Origin', None)
found_origins = ", ".join(found_origins_list) if found_origins_list else None
origins_to_set = get_cors_origins(options, found_origins)
if not origins_to_set: # CORS is not enabled for this route
return CIMultiDict()
# This is a regular dict here, it gets converted to a CIMultiDict at the bottom of this function.
headers = {}
for origin in origins_to_set:
# TODO, with CIDict, with will only allow one origin
# With CIMultiDict it should work with multiple
headers[ACL_ORIGIN] = origin
headers[ACL_EXPOSE_HEADERS] = options.get('expose_headers')
if options.get('supports_credentials'):
headers[ACL_CREDENTIALS] = 'true' # case sensative
# This is a preflight request
# http://www.w3.org/TR/cors/#resource-preflight-requests
if request_method == 'OPTIONS':
acl_request_method = request_headers.get(ACL_REQUEST_METHOD, '').upper()
# If there is no Access-Control-Request-Method header or if parsing
# failed, do not set any additional headers
if acl_request_method and acl_request_method in options.get('methods'):
# If method is not a case-sensitive match for any of the values in
# list of methods do not set any additional headers and terminate
# this set of steps.
acl_request_headers_list = request_headers.getall(ACL_REQUEST_HEADERS, None)
acl_request_headers = ", ".join(acl_request_headers_list) if acl_request_headers_list else None
headers[ACL_ALLOW_HEADERS] = get_allow_headers(options, acl_request_headers)
headers[ACL_MAX_AGE] = str(options.get('max_age')) # sanic cannot handle integers in header values.
headers[ACL_METHODS] = options.get('methods')
else:
LOG.info("The request's Access-Control-Request-Method header does not match allowed methods. "
"CORS headers will not be applied.")
# http://www.w3.org/TR/cors/#resource-implementation
if options.get('vary_header'):
# Only set header if the origin returned will vary dynamically,
# i.e. if we are not returning an asterisk, and there are multiple
# origins that can be matched.
if headers[ACL_ORIGIN] == '*':
pass
elif (len(options.get('origins')) > 1 or
len(origins_to_set) > 1 or
any(map(probably_regex, options.get('origins')))):
headers['Vary'] = "Origin"
return CIMultiDict((k, v) for k, v in headers.items() if v)
def set_cors_headers(req, resp, req_context, options):
"""
Performs the actual evaluation of Sanic-CORS options and actually
modifies the response object.
This function is used in the decorator, the CORS exception wrapper,
and the after_request callback
:param sanic.request.Request req:
"""
# If CORS has already been evaluated via the decorator, skip
if req_context is not None:
evaluated = getattr(req_context, SANIC_CORS_EVALUATED, False)
if evaluated:
LOG.debug('CORS have been already evaluated, skipping')
return resp
# `resp` can be None or [] in the case of using Websockets
# however this case should have been handled in the `extension` and `decorator` methods
# before getting here. This is a final failsafe check to prevent crashing
if not resp:
return None
if resp.headers is None:
resp.headers = CIMultiDict()
headers_to_set = get_cors_headers(options, req.headers, req.method)
LOG.debug('Settings CORS headers: %s', str(headers_to_set))
for k, v in headers_to_set.items():
# Special case for "Vary" header, we should append it to a comma separated list
if (k == "vary" or k == "Vary") and "vary" in resp.headers:
vary_list = resp.headers.popall("vary")
vary_list.append(v)
new_vary = ", ".join(vary_list)
try:
resp.headers.add('Vary', new_vary)
except Exception:
resp.headers['Vary'] = new_vary
else:
try:
resp.headers.add(k, v)
except Exception:
resp.headers[k] = v
return resp
def probably_regex(maybe_regex):
if isinstance(maybe_regex, RegexObject):
return True
else:
common_regex_chars = ['*', '\\',']', '?']
# Use common characters used in regular expressions as a proxy
# for if this string is in fact a regex.
return any((c in maybe_regex for c in common_regex_chars))
def re_fix(reg):
"""
Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to
enable the CORS app extension to have a more user friendly api.
"""
return r'.*' if reg == r'*' else reg
def try_match_any(inst, patterns):
return any(try_match(inst, pattern) for pattern in patterns)
def try_match(request_origin, maybe_regex):
"""Safely attempts to match a pattern or string to a request origin."""
if isinstance(maybe_regex, RegexObject):
return re.match(maybe_regex, request_origin)
elif probably_regex(maybe_regex):
return re.match(maybe_regex, request_origin, flags=re.IGNORECASE)
else:
try:
return request_origin.lower() == maybe_regex.lower()
except AttributeError:
return request_origin == maybe_regex
def get_cors_options(appInstance, *dicts):
"""
Compute CORS options for an application by combining the DEFAULT_OPTIONS,
the app's configuration-specified options and any dictionaries passed. The
last specified option wins.
"""
options = DEFAULT_OPTIONS.copy()
options.update(get_app_kwarg_dict(appInstance))
if dicts:
for d in dicts:
options.update(d)
return serialize_options(options)
def get_app_kwarg_dict(appInstance):
"""Returns the dictionary of CORS specific app configurations."""
# In order to support blueprints which do not have a config attribute
app_config = getattr(appInstance, 'config', {})
return dict(
(k.lower().replace('cors_', ''), app_config.get(k))
for k in CONFIG_OPTIONS
if app_config.get(k) is not None
)
def flexible_str(obj):
"""
A more flexible str function which intelligently handles stringifying
strings, lists and other iterables. The results are lexographically sorted
to ensure generated responses are consistent when iterables such as Set
are used.
"""
if obj is None:
return None
elif(not isinstance(obj, str)
and isinstance(obj, collections.abc.Iterable)):
return ', '.join(str(item) for item in sorted(obj))
else:
return str(obj)
def serialize_option(options_dict, key, upper=False):
if key in options_dict:
value = flexible_str(options_dict[key])
options_dict[key] = value.upper() if upper else value
def ensure_iterable(inst):
"""
Wraps scalars or string types as a list, or returns the iterable instance.
"""
if isinstance(inst, str):
return [inst]
elif not isinstance(inst, collections.abc.Iterable):
return [inst]
else:
return inst
def sanitize_regex_param(param):
return [re_fix(x) for x in ensure_iterable(param)]
def serialize_options(opts):
"""
A helper method to serialize and processes the options dictionary.
"""
options = (opts or {}).copy()
for key in opts.keys():
if key not in DEFAULT_OPTIONS:
LOG.warning("Unknown option passed to Sanic-CORS: %s", key)
# Ensure origins is a list of allowed origins with at least one entry.
options['origins'] = sanitize_regex_param(options.get('origins'))
options['allow_headers'] = sanitize_regex_param(options.get('allow_headers'))
# This is expressly forbidden by the spec. Raise a value error so people
# don't get burned in production.
if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']:
raise ValueError("Cannot use supports_credentials in conjunction with"
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests")
serialize_option(options, 'expose_headers')
serialize_option(options, 'methods', upper=True)
if isinstance(options.get('max_age'), timedelta):
options['max_age'] = str(int(options['max_age'].total_seconds()))
return options
| {
"content_hash": "201fcd88e31dc10dd31c5261c8356cc1",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 112,
"avg_line_length": 37.57881773399015,
"alnum_prop": 0.6366258111031002,
"repo_name": "ashleysommer/sanic-cors",
"id": "fbe53d781d4cbe9f30650306188d9ed9580166a8",
"size": "15281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sanic_cors/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113058"
}
],
"symlink_target": ""
} |
import furnitureList as fl
import random
import warnArea
import constants
from vector import *
def addPlacedFurniture(placedFurniture, furniture, warnAreas):
placedFurniture.append(furniture)
#Add a warn area for the furniture
warnArea.addWarnArea( warnAreas,
(
fl.getCorner1(furniture),
fl.getCorner2(furniture)
),
warnArea.getWarnLevelForFurniture(fl.getType(furniture))
)
def placeFurniture(placedFurniture, availableFurniture, warnAreas):
for i in warnAreas:
print(i)
freeSpace = fl.getFreeSpace(constants.WARNING_HARD, warnAreas)
print("Free space", freeSpace);
bruteForce(placedFurniture, availableFurniture, warnAreas)
def canPlaceCouch(span, warnAreas):
#Check the coordinates
if span[0].x == span[1].x:
if span[0].x == 500:
span[0].x = span[1].x = 0
else:
span[0].x = span[1].x = 500
for i in range(span[0].y, span[1].y + 1):
if warnArea.getWarnLevel(Vector2(span[0].x, i), warnAreas):
return False
else:
if span[0].y == 500:
span[0].y = span[1].y = 0
else:
span[0].y = span[1].y = 500
for i in range(span[0].x, span[1].x + 1):
if warnArea.getWarnLevel(Vector2(i, span[0].y), warnAreas):
return False
return True
def assessScore(furniture, warnAreas):
freeSpaces = fl.getFreeSpace(constants.WARNING_HARD, warnAreas)
if furniture == "bed":
return assessBedScore(freeSpaces)
# elif getType(furniture) == "couch":
# return assessCouchScore(freeSpaces)
elif furniture == "desk":
return assessDeskScore(freeSpaces)
# elif getType(furniture) == "chair":
# return assessChairScore(freeSpaces)
elif furniture == "tv":
return assessTVScore(freeSpaces, warnAreas)
elif furniture == "table":
return assessTableScore(freeSpaces)
# elif getType(furniture) == "rug":
# return assessRugScore(freeSpaces)
elif furniture == "shelf":
return assessShelfScore(freeSpaces)
# Functions for assesing the scores of different pieces of
# furniture. TODO THIS MIGHT NOT WORK WITH THE TV AND COUCH GROUP
#def assessBedScore(freeSpaces):
# spacesWithScore = []
# for space in freeSpaces:
# score = 100
# v1 = space[0]
# v2 = space[1]
# distance = get_distance(v1, v2)
# score *= 1/distance
# if space[0].y != ROOM_WIDTH: #if we are
#
# corner1 = space[0]
# corner2 = space[0].
# if isFree()
# score = 0
# spacesWithScore.append(space + [score])
#
# return spacesWithScore
def bruteForce(placedFurniture, availableFurniture, warnAreas):
print("avail is ", availableFurniture)
for furniture in availableFurniture:
if furniture == "chair":
continue
maxIt = 100 # maximum number of tests
numberOfItems = availableFurniture[furniture]
while maxIt and numberOfItems:
maxIt -= 1
numchairs = availableFurniture["chair"]
if furniture == "desk" and numchairs:
fW = constants.FURNITURE_SIZES[furniture][0] + constants.CHAIR_SIZE[0]
else:
fW = constants.FURNITURE_SIZES[furniture][0]
fH = constants.FURNITURE_SIZES[furniture][1]
randx = random.randint(0, constants.ROOM_WIDTH - fW)
randy = random.randint(0, constants.ROOM_WIDTH - fH)
v1 = Vector2(randx, randy)
v2 = Vector2(randx + fW, randy + fH)
if warnArea.isFree(v1, v2, warnAreas):
if furniture == "desk" and numchairs:
availableFurniture["chair"] -= 1
chairOffset = Vector2(0, constants.CHAIR_SIZE[0])
addPlacedFurniture(placedFurniture, \
createFurniture(v1 + chairOffset, v2 + chairOffset, "chair"), warnAreas)
numberOfItems -= 1
addPlacedFurniture(placedFurniture, \
createFurniture(v1, v2, furniture), warnAreas)
def createFurniture(vec1, vec2, type_):
return (vec1.x, vec1.y, vec2.x, vec2.y, type_)
# TODO probably not applicable
def assessCouchScore(freeSpaces):
pass
def assessDeskScore(freeSpaces):
pass
# TODO probably not applicable
def assessChairScore(freeSpaces):
pass
def assessTVScore(freeSpaces, warnAreas):
spacesWithScore = []
for space in freeSpaces:
score = 100
v1 = space[0]
v2 = space[1]
distance = get_distance(v1, v2)
if distance < constants.TV_SIZE[1]: # if the space does not fit tv...
spacesWithScore.append(space + [0]) # score is 0
continue
score *= 1/distance
if not canPlaceCouch(space, warnAreas):
score = 0
spacesWithScore.append(space + [score])
return spacesWithScore
# TODO probably not applicable
def assessTableScore(freeSpaces):
pass
# TODO probably not applicable
def assessRugScore(freeSpaces):
pass
def assessShelfScore(freeSpaces):
pass
def placeFurnitureInSpan(furnitureName, span, placedFurniture, warnAreas):
furnitureSize = constants.FURNITURE_SIZES[furnitureName];
print("Span: ", span);
width = furnitureSize[0]
height = furnitureSize[1];
pos0 = Vector2(0,0)
pos1 = Vector2(0,0)
#Calculating the direction of the furniture
if span[0].y == span[1].y:
middle = span[0].x + (span[1].x - span[0].x) / 2;
if(span[0].y == 0):
pos0.x = middle - width / 2;
pos0.y = span[0].y;
pos1.x = middle + width / 2;
pos1.y = span[0].y + height
else:
pos0.x = middle + width / 2;
pos0.y = span[0].y;
pos1.x = middle - width / 2;
pos1.y = span[0].y - height
else:
middle = span[0].y + (span[1].y - span[0].y) / 2;
if(span[0].x == 0):
pos0.x = span[0].x
pos0.y = middle + width / 2
pos1.x = span[0].x + height
pos1.y = middle - width / 2
else:
pos0.x = span[0].x
pos0.y = middle - width / 2
pos1.x = span[0].x - height
pos1.y = middle + width
addPlacedFurniture(placedFurniture, (pos0.x, pos0.y, pos1.x, pos1.y, furnitureName), warnAreas);
###################################################################
# Help functions
###################################################################
###################################################################
| {
"content_hash": "b8f131bded08b534c23d7035a6ea74c9",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 111,
"avg_line_length": 31.427906976744186,
"alnum_prop": 0.5680035518721326,
"repo_name": "TheZoq2/VRHack",
"id": "83ea964fc9d5e4ab59a2491e5fd69e1903f912cb",
"size": "6757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/placement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "665"
},
{
"name": "HTML",
"bytes": "9084"
},
{
"name": "JavaScript",
"bytes": "487721"
},
{
"name": "Python",
"bytes": "23993"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
""" Title: Ch3LpfVarIt - Chapter 3: Low-pass filter: Resulution vs. Iterations
Author: Ricardo Alejos
Date: 2016-09-20
Description: Runs optimizations over the LPF varying its resolution and the
Simulated Annealing's iterations.
Version: 1.0.0
Comments: -
"""
# Import Python's built-in modules
import csv as _csv
import logging as _logging
import os as _os
import sys as _sys
import time as _time
# Add project root directory to sys.path so other modules can be imported
_projectRoot = _os.path.abspath(__file__ + "\\..\\..\\..")
if _projectRoot not in _sys.path:
_sys.path.insert(0, _projectRoot)
_strThisFileName = _os.path.splitext(_os.path.basename(__file__))[0]
import pkg.Algorithm.SimAnnMin as _sam
import pkg.ObjectiveFunctions.MsLpf as _lpf
def _initLogger():
global logger
logger = _logging.getLogger(_strThisFileName)
logger.setLevel(_logging.DEBUG)
map(logger.removeHandler, logger.handlers[:])
ch = _logging.StreamHandler(_sys.stdout)
ch.setLevel(_logging.INFO)
fh = _logging.FileHandler(_strThisFileName + ".log")
fh.setLevel(_logging.DEBUG)
formatter = _logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.debug("A new logger session has started.")
_initLogger()
def main():
with open(_strThisFileName + "_" + _time.strftime('%Y%m%d%H%M%S') + ".csv", "wb") as fhReport:
lRptFld = [
"SaCfg",
"SampleNum",
"u*",
"x*_0",
"x*_1",
"x*_2",
]
cwReport = _csv.DictWriter(fhReport, lRptFld)
cwReport.writeheader()
lstSaCfg = ["TTT", "STT", "FTT", "TST", "TFT", "TTS", "TTF"]
numItn = 50
dicTmeFun = dict(
T = _sam.TmeFns.typical(numItn),
F = _sam.TmeFns.fast(numItn),
S = _sam.TmeFns.slow(numItn)
)
dicSseFun = dict(
T = _sam.SseFns.typical,
F = _sam.SseFns.fast,
S = _sam.SseFns.slow
)
dicAceFun = dict(
T = _sam.AceFns.typical,
F = _sam.AceFns.fast,
S = _sam.AceFns.slow
)
numSmpPerCfg = 10
u = _lpf.getInterfaceFunction(2)
for strSaCfg in lstSaCfg:
for numSample in range(numSmpPerCfg):
logger.info("Running SAM using the %s configuration."%strSaCfg)
dReportRow = dict((key, None) for key in lRptFld)
dReportRow["SaCfg"] = strSaCfg
dReportRow["SampleNum"] = numSample
SamObj = _sam.SimAnnMin()
SamObj.setObeFun(u)
SamObj.setTmeLst(dicTmeFun[strSaCfg[0]])
SamObj.setSseFun(dicSseFun[strSaCfg[1]])
SamObj.setAceFun(dicAceFun[strSaCfg[2]])
SamObj.setX0([-0.7,0.5,0.1])
SamObj.runAll()
nUo = SamObj.getUo()
logger.info("Got u* = %0.4f"%nUo)
dReportRow["u*"] = "%0.4f"%nUo
lXo = SamObj.getXo()
sXo = "[ " + " ".join(["%0.4f"%x for x in lXo]) + " ]"
logger.info("Got x* = %s"%sXo)
dReportRow["x*_0"] = "%0.4f"%lXo[0]
dReportRow["x*_1"] = "%0.4f"%lXo[1]
dReportRow["x*_2"] = "%0.4f"%lXo[2]
cwReport.writerow(dReportRow)
main() | {
"content_hash": "b48bbcc33bb7deb2f5983f67ede072ea",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 98,
"avg_line_length": 35.656565656565654,
"alnum_prop": 0.5487252124645893,
"repo_name": "ricardoalejos/RalejosMsrElcDsn",
"id": "8192248725fbb8fdba60bc7fe39ba122d01d8634",
"size": "3530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SmdAngPtnSnt/pkg/ExpFlows/Ch3SaCalibration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49325"
}
],
"symlink_target": ""
} |
"""
A simple renderer for HTML to plain text. It knows about <p>, <div>,
and <blockquote>; all other markup is ignored.
Paragraphs are collected and wrapped. Blockquotes are indented.
Paragraphs cannot be nested at this time. HTML entities are
substituted. Usually this should only be used with markup that is
intended to be used with this renderer (e.g., ZPTKit.emailtemplate).
The render() function is the easiest way to use this.
"""
from HTMLParser import HTMLParser
try:
import textwrap
except:
# Was added in Python 2.3
from backports import textwrap
import re
import htmlentitydefs
DEFAULT_ENCODING = 'utf8'
def render(text, width=70):
context = Context()
context.width = width
context.indent = 0
p = HTMLRenderer()
p.feed(text)
p.close()
paras = [encode_unicode(para.to_text(context))
for para in p.paragraphs
if para]
return ''.join(paras)
def encode_unicode(s, encoding=None):
if isinstance(s, unicode):
return s.encode(encoding or DEFAULT_ENCODING)
return s
class HTMLRenderer(HTMLParser):
block_tags = 'p div blockquote h1 h2 h3 h4 h5 h6 ul ol'.split()
def reset(self):
HTMLParser.reset(self)
self.paragraphs = []
self.in_paragraph = None
self.last_href = None
self.href_content = None
self.in_table = None
self.cell_content = None
self.list_type = []
def handle_starttag(self, tag, attrs):
tag = tag.lower()
if tag == 'body':
self.paragraphs = []
self.in_paragraph = None
if tag == 'blockquote':
self.paragraphs.append(Indenter(4))
if tag in self.block_tags:
self.start_para(tag, attrs)
if tag == 'br':
self.add_br(tag, attrs)
if tag == 'a':
self.last_href = self.get_attr(attrs, 'href')
self.href_content = []
if tag == 'img':
alt = self.get_attr(attrs, 'alt')
if alt:
self.handle_data(alt)
if tag == 'table':
# @@: This is a hacky way of dealing with nested
# tables. Basically the tables are crudely flattened,
# and we keep track of how many <table>'s we see based
# on the table.depth attribute (so we can later remove
# the table when sufficient </table>'s have been seen)
if self.in_table:
self.in_table.depth += 1
else:
self.in_table = Table()
if tag == 'tr':
self.in_table.add_row()
if tag == 'td':
self.cell_content = []
if tag == 'ul':
self.paragraphs.append(Indenter(2))
self.list_type.append('ul')
if tag == 'ol':
self.paragraphs.append(Indenter(2))
self.list_type.append(1)
if tag == 'li':
self.add_br(None, None)
if not self.list_type or self.list_type[-1] == 'ul':
self.handle_data('* ')
else:
self.handle_data('%i) ' % self.list_type[-1])
self.list_type[-1] += 1
def handle_endtag(self, tag):
if tag in self.block_tags:
self.end_para(tag)
if tag == 'a':
if self.href_content:
content = ''.join(self.href_content)
else:
content = None
self.href_content = None
if content and self.last_href and content != self.last_href:
self.handle_data(' <%s>' % self.last_href)
self.last_href = None
if tag == 'table':
self.paragraphs.append(self.in_table)
self.in_table.depth -= 1
if not self.in_table.depth:
self.in_table = None
if tag == 'td':
self.end_para(tag)
if self.paragraphs:
self.in_table.add_cell(self.paragraphs[-1])
self.paragraphs.pop()
if tag == 'ul' or tag == 'ol':
self.paragraphs.append(Indenter(-2))
self.list_type.pop()
if tag == 'blockquote':
self.paragraphs.append(Indenter(-4))
def handle_data(self, data):
if self.in_paragraph is None:
self.start_para(None, None)
self.in_paragraph.add_text(data)
if self.href_content is not None:
self.href_content.append(data)
def handle_entityref(self, name):
name = name.lower()
if name not in htmlentitydefs.entitydefs:
# bad entity, just let it through
# (like a &var=value in a URL)
self.handle_data('&'+name)
return
result = htmlentitydefs.entitydefs[name]
if result.startswith('&'):
self.handle_charref(result[2:-1])
else:
self.handle_data(result)
def handle_charref(self, name):
try:
self.handle_data(unichr(int(name)))
except ValueError:
self.handle_data('&' + name)
def start_para(self, tag, attrs):
if tag is None:
# Implicit paragraph
tag = 'p'
attrs = []
self.end_para(None)
self.in_paragraph = Paragraph(tag, attrs)
def end_para(self, tag):
if self.in_paragraph:
self.paragraphs.append(self.in_paragraph)
self.in_paragraph = None
def add_br(self, tag, attrs):
if not self.in_paragraph:
self.start_para(None, None)
self.in_paragraph.add_tag('<br>')
def close(self):
HTMLParser.close(self)
self.end_para(None)
def get_attr(self, attrs, name, default=None):
for attr_name, value in attrs:
if attr_name.lower() == name.lower():
return value
return default
class Paragraph:
def __init__(self, tag, attrs):
self.tag = tag
self.attrs = attrs
self.text = []
self._default_align = 'left'
def __repr__(self):
length = len(''.join(map(str, self.text)))
attrs = ' '.join([self.tag] +
['%s="%s"' % (name, value)
for name, value in self.attrs] +
['length=%i' % length])
return '<Paragraph %s: %s>' % (hex(id(self))[2:], attrs)
def add_text(self, text):
self.text.append(text)
def add_tag(self, tag):
self.text.append([tag])
def to_text(self, context):
lines = self.make_lines()
width = context.width
indent = context.indent
wrapped_lines = []
for line in lines:
wrapped = textwrap.wrap(
line,
width,
replace_whitespace=True,
initial_indent=' '*indent,
subsequent_indent=' '*indent,
fix_sentence_endings=False,
break_long_words=False)
wrapped_lines.extend(wrapped)
if self.tag in ('h1', 'h2'):
self._default_align = 'center'
lines = self.align_lines(wrapped_lines, width)
text = '\n'.join(lines)
if self.tag in ('h1', 'h3'):
text = text.upper()
if self.tag == 'h4':
text = '*%s*' % text
return text + '\n\n'
def align_lines(self, lines, width):
if self.alignment() == 'right':
return [' '*(width-len(line))+line
for line in lines]
elif self.alignment() == 'center':
return [' '*((width-len(line))/2)+line
for line in lines]
elif self.alignment() == 'left':
return lines
else:
# Could be odd things like 'baseline'; treat it as normal
return lines
def make_lines(self):
lines = ['']
for data in self.text:
if isinstance(data, list):
tag = data[0]
if tag == '<br>':
lines.append('')
else:
assert 0, "Unknown tag: %r" % tag
else:
lines[-1] = lines[-1] + data
return [normalize(line).strip()
for line in lines
if line]
def alignment(self):
for name, value in self.attrs:
if name.lower() == 'align':
return value.lower()
return self._default_align
def __nonzero__(self):
for t in self.text:
if t:
return True
return False
class Table:
def __init__(self):
self.rows = []
self.row_num = 0
self.depth = 1
def add_row(self):
self.row_num += 1
self.rows.append([])
def add_cell(self, value):
self.rows[-1].append(value)
def __nonzero__(self):
return not not self.rows
def to_text(self, context):
if self.rows and not self.rows[-1]:
# Get rid of blank last line
self.rows.pop()
if not self.rows:
return ''
headers = [p.to_text(context).strip() for p in self.rows.pop(0)]
context.indent += 4
lines = []
for row in self.rows:
for header, cell in zip(headers, row):
cell_text = cell.to_text(context).strip()
lines.append('%s: %s' % (header, cell_text))
lines.append('')
context.indent -= 4
return '\n'.join(lines) + '\n\n'
class Indenter:
def __init__(self, indent):
self.indent = indent
def to_text(self, context):
context.indent += self.indent
return ''
class Context:
pass
def normalize(text):
text = re.sub(r'\s+', ' ', text)
# nbsp:
if not isinstance(text, unicode):
text = text.replace('\xa0', ' ')
return text
if __name__ == '__main__':
import sys
args = sys.argv[1:]
if not args:
input = sys.stdin.read()
else:
input = open(args[0]).read()
print render(input)
| {
"content_hash": "c72ca18ce2f1139eb7708c81225e9baa",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 72,
"avg_line_length": 30.415151515151514,
"alnum_prop": 0.518581249377304,
"repo_name": "kevinrenskers/django-generic-mail",
"id": "7cd2fdb92d5b1e2740b8337cf432b55e2e89d852",
"size": "11311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generic_mail/htmlrenderer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20377"
}
],
"symlink_target": ""
} |
"""Sample integration test module."""
# pylint: disable=no-self-use,missing-docstring
# from cf_predict import create_app
| {
"content_hash": "6e0c906136b8875cee2485d105b39fa0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 47,
"avg_line_length": 30.75,
"alnum_prop": 0.7560975609756098,
"repo_name": "ronert/cf-predict",
"id": "28521fdc7a4fdc946687837b1c0ccb6ed445d099",
"size": "123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "15164"
},
{
"name": "Shell",
"bytes": "197"
}
],
"symlink_target": ""
} |
import sys
import io
import paramiko
import os
import re
from config import (ConfigSectionMap)
my_ssh_client = None
def open_ssh_client():
ssh_host = ConfigSectionMap("Backup")['sshhost']
ssh_port = ConfigSectionMap("Backup")['sshport']
ssh_username = ConfigSectionMap("Backup")['sshusername']
print "SSH Host: %s" % (ssh_host)
print "SSH Port: %s" % (ssh_port)
print "SSH Username: %s" % (ssh_username)
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=ssh_host,username=ssh_username)
print "Connected!"
return ssh_client
# Transfers the file "local_flo" over ssh/sftp to the configured remote server.
# local_flo can be either a string specifying the file path, or a file-like object (stream).
# Note that if a stream is supplied, the method also needs the file size to be specified,
# via the parameter byte_size.
def transfer_file(local_flo, dataset_authority, dataset_identifier, storage_identifier, byte_size):
sftp_client=my_ssh_client.open_sftp()
remote_dir = dataset_authority + "/" + dataset_identifier
subdirs = remote_dir.split("/")
cdir = ConfigSectionMap("Backup")['backupdirectory'] + "/"
for subdir in subdirs:
try:
cdir = cdir + subdir + "/"
sftpattr=sftp_client.stat(cdir)
except IOError:
#print "directory "+cdir+" does not exist (creating)"
sftp_client.mkdir(cdir)
#else:
# print "directory "+cdir+" already exists"
m = re.search('^([a-z0-9]*)://(.*)$', storage_identifier)
if m is not None:
storageTag = m.group(1)
storage_identifier = re.sub('^.*:', '', storage_identifier)
remote_file = cdir + storage_identifier
if (type(local_flo) is str):
sftp_client.put(local_flo,remote_file)
else:
# assume it's a stream:
# sftp_client.putfo() is convenient, but appears to be unavailable in older
# versions of paramiko; so we'll be using .read() and .write() instead:
#sftp_client.putfo(local_flo,remote_file,byte_size)
sftp_stream = sftp_client.open(remote_file,"wb")
while True:
buffer = local_flo.read(32*1024)
if len(buffer) == 0:
break;
sftp_stream.write (buffer)
sftp_stream.close()
sftp_client.close()
print "File transfered."
return remote_file
def verify_remote_file(remote_file, checksum_type, checksum_value):
try:
stdin,stdout,stderr=my_ssh_client.exec_command("ls "+remote_file)
remote_file_checked = stdout.readlines()[0].rstrip("\n\r")
except:
raise ValueError("remote file check failed (" + remote_file + ")")
if (remote_file != remote_file_checked):
raise ValueError("remote file NOT FOUND! (" + remote_file_checked + ")")
if (checksum_type == "MD5"):
remote_command = "md5sum"
elif (checksum_type == "SHA1"):
remote_command = "sha1sum"
try:
stdin,stdout,stderr=my_ssh_client.exec_command(remote_command+" "+remote_file)
remote_checksum_value = (stdout.readlines()[0]).split(" ")[0]
except:
raise ValueError("remote checksum check failed (" + remote_file + ")")
if (checksum_value != remote_checksum_value):
raise ValueError("remote checksum BAD! (" + remote_checksum_value + ")")
def backup_file_ssh(file_input, dataset_authority, dataset_identifier, storage_identifier, checksum_type, checksum_value, byte_size=0):
global my_ssh_client
if (my_ssh_client is None):
my_ssh_client = open_ssh_client()
print "ssh client is not defined"
else:
print "reusing the existing ssh client"
try:
file_transfered = transfer_file(file_input, dataset_authority, dataset_identifier, storage_identifier, byte_size)
except:
raise ValueError("failed to transfer file")
verify_remote_file(file_transfered, checksum_type, checksum_value)
def main():
print "entering ssh (standalone mode)"
print "testing local file:"
try:
file_path="config.ini"
backup_file_ssh("config.ini", "1902.1", "XYZ", "config.ini", "MD5", "8e6995806b1cf27df47c5900869fdd27")
except ValueError:
print "failed to verify file (\"config.ini\")"
else:
print "file ok"
print "testing file stream:"
try:
file_size = os.stat(file_path).st_size
print ("file size: %d" % file_size)
file_stream = io.open("config.ini", "rb")
backup_file_ssh(file_stream, "1902.1", "XYZ", "config.ini", "MD5", "8e6995806b1cf27df47c5900869fdd27", file_size)
except ValueError:
print "failed to verify file (\"config.ini\")"
else:
print "file ok"
if __name__ == "__main__":
main()
| {
"content_hash": "ba8b75c355151e52f3be83af57cbdff6",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 135,
"avg_line_length": 33.10204081632653,
"alnum_prop": 0.6329634196465269,
"repo_name": "JayanthyChengan/dataverse",
"id": "3355b9cffb20656ff74871d2306c796a0514bc4a",
"size": "4901",
"binary": false,
"copies": "8",
"ref": "refs/heads/dataverse-contactform-afflist",
"path": "scripts/backup/run_backup/backup_ssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "48137"
},
{
"name": "HTML",
"bytes": "781240"
},
{
"name": "Java",
"bytes": "4153477"
},
{
"name": "JavaScript",
"bytes": "390936"
},
{
"name": "Makefile",
"bytes": "3299"
},
{
"name": "Perl",
"bytes": "58151"
},
{
"name": "Python",
"bytes": "58198"
},
{
"name": "R",
"bytes": "36411"
},
{
"name": "Ruby",
"bytes": "1670"
},
{
"name": "Shell",
"bytes": "96276"
},
{
"name": "XSLT",
"bytes": "455"
}
],
"symlink_target": ""
} |
import edl.batch
def test_chunk_size_calcs():
assert edl.batch.calculateChunkSize(100,100,10) == 10
assert edl.batch.calculateChunkSize(1000,100,10) == 100
assert edl.batch.calculateChunkSize(100,100,9) == 12
assert edl.batch.calculateChunkSize(1000,1000,9) == 112
| {
"content_hash": "431c0a6f078958d6ae857bde63b5a201",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 59,
"avg_line_length": 36,
"alnum_prop": 0.7152777777777778,
"repo_name": "jmeppley/py-metagenomics",
"id": "aa2694ba5f761b01473899358f1cff00eebfe852",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_11_batch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "28823"
},
{
"name": "Perl",
"bytes": "4389"
},
{
"name": "Python",
"bytes": "426256"
},
{
"name": "Roff",
"bytes": "4605"
},
{
"name": "Shell",
"bytes": "66740"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
from datetime import datetime
# third-party imports
import six
import psutil
# using psutil.Process.wait() in unconventional manner
from psutil import TimeoutExpired as ProcessNotFinished
def source(script, old_env=None):
"""Emulates source in bash and returns the resulting environment object.
"""
if not os.path.isfile(script):
return None
# Force use of bash shell
print_env_cmd = 'env; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"; echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH"'
cmd = ['/bin/bash', '-c', 'source %s; %s' % (script, print_env_cmd)]
stdout = subprocess.check_output(cmd, env=old_env, universal_newlines=True)
return dict((line.split('=', 1) for line in stdout.splitlines() if len(line.split('=', 1)) == 2))
def execute(cmd, stdin=None, stdout=None, stderr=subprocess.STDOUT,
cwd=None, env=None):
"""Execute command as child process.
Args:
cmd: either a string containing the entire command to be executed, or
a sequence of program arguments.
Other arguments are the usual subprocess.Popen() arguments.
Returns:
Instance of the psutil.Popen class. This provides the methods of the
subprocess.Popen and psutil.Process classes in a single interface.
"""
# Tokenize string into arguments
if isinstance(cmd, six.string_types):
import shlex
cmd = shlex.split(cmd)
# Don't pass kwargs because I want to limit functionality
return psutil.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr,
cwd=cwd, env=env)
def monitor(proc, timeout=None, min_dt=1, max_ndata=10):
"""Monitor the status of a process and record performance data. If the
number of measurements exceeds max_ndata, the data is resampled and the
time between measurements is increased.
Args:
proc: instance of psutil.Popen class.
timeout: time after which process will be killed [seconds].
min_dt: minimum time between performance measurements [seconds].
max_ndata: maximum number of performace measurements.
Returns:
(exit_code, performance): where performance is a dictionary containing
data relating to the process performance (duration, maximum memory
usage, and performance measurements made throughout execution).
Duration is set to None if the process times out.
"""
resampling_factor = 2
time_init = datetime.fromtimestamp(proc.create_time())
dt = min_dt
ndata = 0
data = {}
max_memory = 0.0
# This block uses the psutil.Process.wait() method in an unconventional
# manner, in order to precisely determine the process duration, whilst
# also choosing the sampling rate of performance measurements. Please only
# edit if you fully understand how this works, as it is easy to break.
while True:
try:
exit_code = proc.wait(dt)
# Process has finished
duration = (datetime.now() - time_init).total_seconds()
break
# Process has not finished
except ProcessNotFinished:
t = (datetime.now() - time_init).total_seconds()
# Measure performance
try:
datum = _measure_performance(proc, t)
for k in datum.keys():
if k not in data:
data[k] = []
data[k].append(datum[k])
ndata += 1
max_memory = max(max_memory, datum['memory_MB'])
except (psutil.AccessDenied, psutil.NoSuchProcess):
continue
# Kill process if it exceeds timeout period
if timeout and t >= timeout:
proc.kill()
exit_code = 0
duration = None
break
# Resample data if necessary
if ndata >= max_ndata:
for arr in data.values():
del arr[::resampling_factor]
ndata = len(data)
dt *= resampling_factor
data['duration'] = duration
data['max_memory_MB'] = max_memory
return (exit_code, data)
def _measure_performance(proc, time):
"""Measure performance statistics of process.
Args:
proc: instance of psutil.Popen class.
time: time at which measurement is made (is appended to data).
Returns:
dict containing performance data at this time.
"""
datum = {
'time': time,
'cpu_pcnt': proc.cpu_percent(),
'memory_MB': float(proc.memory_info().rss) / 1024 / 1024,
}
if not sys.platform.startswith('darwin') and \
not sys.platform.lower().startswith('sunos'):
read_MB = float(proc.io_counters().read_bytes) / 1024 / 1024
write_MB = float(proc.io_counters().write_bytes) / 1024 / 1024
datum['read_MB'] = read_MB
datum['write_MB'] = write_MB
return datum
| {
"content_hash": "34e92228eb597bcf958dcdb85161044e",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 111,
"avg_line_length": 33.96621621621622,
"alnum_prop": 0.6150785756912671,
"repo_name": "davidchall/nrtest",
"id": "66ae6160e070dd57ae423c56782df2a913147be1",
"size": "5069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nrtest/process.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1996"
},
{
"name": "Python",
"bytes": "34545"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# @lint-avoid-pyflakes2
# @lint-avoid-pyflakes3
import unittest
import math
from thrift.protocol import TSimpleJSONProtocol
from thrift.transport.TTransport import TMemoryBuffer
from thrift.util import Serializer
from SimpleJSONRead.ttypes import SomeStruct, Stuff
def writeToJSON(obj):
trans = TMemoryBuffer()
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans)
obj.write(proto)
return trans.getvalue()
def readStuffFromJSON(jstr, struct_type=Stuff):
stuff = struct_type()
trans = TMemoryBuffer(jstr)
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans,
struct_type.thrift_spec)
stuff.read(proto)
return stuff
class TestSimpleJSONRead(unittest.TestCase):
def test_primitive_type(self):
stuff = Stuff(
aString="hello",
aShort=10,
anInteger=23990,
aLong=123456789012,
aDouble=1234567.9,
aBool=True)
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(stuff_read.aString, "hello")
self.assertEqual(stuff_read.aShort, 10)
self.assertEqual(stuff_read.anInteger, 23990)
self.assertEqual(stuff_read.aLong, 123456789012)
self.assertEqual(stuff_read.aDouble, 1234567.9)
self.assertTrue(stuff_read.aBool)
def test_escape_string(self):
stuff = Stuff(
aString=b'\\hello')
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(stuff_read.aString, '\\hello')
def test_unusual_numbers(self):
j = '{ "aListOfDouble": ["inf", "-inf", "nan"]}'
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aListOfDouble), 3)
self.assertTrue(math.isinf(stuff_read.aListOfDouble[0]))
self.assertTrue(math.isinf(stuff_read.aListOfDouble[1]))
self.assertTrue(math.isnan(stuff_read.aListOfDouble[2]))
def test_unexpected_field(self):
ss = SomeStruct(anInteger=1)
j = '{ "anInteger": 101, "unexpected": 111.1}'
struct_read = readStuffFromJSON(j, struct_type=SomeStruct)
self.assertEqual(struct_read.anInteger, 101)
def test_map(self):
stuff = Stuff(
aMap={1: {"hello": [1,2,3,4],
"world": [5,6,7,8]},
2: {"good": [100, 200],
"bye": [300, 400]}
},
anotherString="Hey")
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aMap), 2)
self.assertEqual(stuff_read.aMap[1]["hello"], [1,2,3,4])
self.assertEqual(stuff_read.aMap[1]["world"], [5,6,7,8])
self.assertEqual(stuff_read.aMap[2]["good"], [100, 200])
self.assertEqual(stuff_read.aMap[2]["bye"], [300, 400])
self.assertEqual(stuff_read.anotherString, "Hey")
def test_list(self):
stuff = Stuff(
aList=[
[[["hello", "world"], ["good", "bye"]]],
[[["what", "is"], ["going", "on"]]]],
anotherString="Hey")
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aList), 2)
self.assertEqual(stuff_read.aList[0][0][0], ["hello", "world"])
self.assertEqual(stuff_read.aList[0][0][1], ["good", "bye"])
self.assertEqual(stuff_read.aList[1][0][0], ["what", "is"])
self.assertEqual(stuff_read.aList[1][0][1], ["going", "on"])
self.assertEqual(stuff_read.anotherString, "Hey")
def test_set(self):
stuff = Stuff(
aListOfSet=[set(["hello"]), set(["world"])],
anotherString="Hey")
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aListOfSet), 2)
self.assertEqual(stuff_read.aListOfSet[0], set(["hello"]))
self.assertEqual(stuff_read.aListOfSet[1], set(["world"]))
self.assertEqual(stuff_read.anotherString, "Hey")
def test_struct(self):
stuff = Stuff(
aStruct=SomeStruct(anInteger=12,
aMap={"hi": 1.5}),
aListOfStruct=[
SomeStruct(anInteger=10,
aMap={"good": 2.0}),
SomeStruct(anInteger=11,
aMap={"bye": 1.0})],
anotherString="Hey"
)
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aListOfStruct), 2)
self.assertEqual(stuff_read.aListOfStruct[0].anInteger, 10)
self.assertEqual(stuff_read.aListOfStruct[0].aMap["good"], 2.0)
self.assertEqual(stuff_read.aListOfStruct[1].anInteger, 11)
self.assertEqual(stuff_read.aListOfStruct[1].aMap["bye"], 1.0)
self.assertEqual(stuff_read.anotherString, "Hey")
def test_deserializer(self):
j = '{"aShort": 1, "anInteger": 2, "aLong": 3}'
stuff = Stuff()
Serializer.deserialize(
TSimpleJSONProtocol.TSimpleJSONProtocolFactory(), j, stuff)
self.assertEqual(stuff.aShort, 1)
self.assertEqual(stuff.anInteger, 2)
self.assertEqual(stuff.aLong, 3)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "aa28b279c702e2acb2007abdb1813a82",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 76,
"avg_line_length": 38.75694444444444,
"alnum_prop": 0.5826912739652392,
"repo_name": "getyourguide/fbthrift",
"id": "9e55f1f6063b96281250c1646f351ee9589b20cd",
"size": "5581",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thrift/test/py/SimpleJSONReadTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "154349"
},
{
"name": "C#",
"bytes": "28929"
},
{
"name": "C++",
"bytes": "17798156"
},
{
"name": "CMake",
"bytes": "33182"
},
{
"name": "D",
"bytes": "669764"
},
{
"name": "Emacs Lisp",
"bytes": "5154"
},
{
"name": "Erlang",
"bytes": "23039"
},
{
"name": "Go",
"bytes": "375816"
},
{
"name": "HTML",
"bytes": "404999"
},
{
"name": "Hack",
"bytes": "768869"
},
{
"name": "Haskell",
"bytes": "305707"
},
{
"name": "Java",
"bytes": "2408919"
},
{
"name": "JavaScript",
"bytes": "6018"
},
{
"name": "Lex",
"bytes": "11934"
},
{
"name": "M4",
"bytes": "99563"
},
{
"name": "Makefile",
"bytes": "53670"
},
{
"name": "OCaml",
"bytes": "32043"
},
{
"name": "Objective-C",
"bytes": "152361"
},
{
"name": "PHP",
"bytes": "322092"
},
{
"name": "Perl",
"bytes": "70682"
},
{
"name": "Protocol Buffer",
"bytes": "585"
},
{
"name": "Python",
"bytes": "2413275"
},
{
"name": "Ruby",
"bytes": "328584"
},
{
"name": "Shell",
"bytes": "32559"
},
{
"name": "Smalltalk",
"bytes": "22812"
},
{
"name": "TeX",
"bytes": "48707"
},
{
"name": "Thrift",
"bytes": "259661"
},
{
"name": "Vim script",
"bytes": "2837"
},
{
"name": "Yacc",
"bytes": "36158"
}
],
"symlink_target": ""
} |
import urllib2
import json
from time import time
from plugins.plugin import Plugin
from bytebot_config import BYTEBOT_HTTP_TIMEOUT, BYTEBOT_HTTP_MAXSIZE
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
class fuel(Plugin):
def __init__(self):
pass
def registerCommand(self, irc):
irc.registerCommand("!fuel", "Treibstoffpreise")
def _get_fuel_stations(self):
url = "https://creativecommons.tankerkoenig.de/json/list.php?" + \
"lat=50.9827792" + \
"&lng=11.0394426" + \
"&rad=15" + \
"&sort=dist" + \
"&type=e5&apikey=" + \
str(BYTEBOT_PLUGIN_CONFIG["fuel"]["apikey"])
data = urllib2.urlopen(url, timeout=BYTEBOT_HTTP_TIMEOUT).read(
BYTEBOT_HTTP_MAXSIZE)
return json.loads(data)
def _get_fuel_stations_details(self, station_id):
url = "https://creativecommons.tankerkoenig.de/json/detail.php?" + \
"id=" + station_id + \
"&apikey=" + str(BYTEBOT_PLUGIN_CONFIG["fuel"]["apikey"])
data = urllib2.urlopen(url, timeout=BYTEBOT_HTTP_TIMEOUT).read(
BYTEBOT_HTTP_MAXSIZE)
return json.loads(data)
def onPrivmsg(self, irc, msg, channel, user):
if msg.find("!fuel") == -1:
return
self.irc = irc
self.channel = channel
try:
last_fuel = irc.last_fuel
except Exception:
last_fuel = 0
if last_fuel < (time() - 60):
try:
data = self._get_fuel_stations()
except Exception:
irc.msg(channel, "Error while fetching data.")
if len(data) == 0:
irc.msg(channel, "'I'm sorry, no fuel data.")
return
messages = []
for x in range(len(data['stations'])):
brand = data[u'stations'][x][u"brand"]
station_id = data['stations'][x][u"id"]
postCode = data['stations'][x][u"postCode"]
data_details = self._get_fuel_stations_details(station_id)
e5 = data_details['station']['e5']
e10 = data_details['station']['e10']
diesel = data_details['station']['diesel']
if brand == '':
brand = 'GLOBUS'
print_str = \
u" {:20}".format(brand + ', ' + str(postCode) + ': ') + \
u"{:5} ".format(e5) + \
u"{:5} ".format(e10) + \
u"{:5} ".format(diesel)
messages.append(print_str)
headline = u"{:23}".format('fuel prices:') + \
u"{:6} ".format('e5') + \
u"{:6} ".format('e10') + \
u"{:6} ".format('diesel')
irc.msg(channel, headline.encode("utf-8", "ignore"))
for m in messages:
irc.msg(channel, m.encode("utf-8", "ignore"))
irc.last_fuel = time()
else:
irc.msg(channel, "Don't overdo it ;)")
| {
"content_hash": "cb5e2957aa95b481f73ab71493c4b127",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 31.07070707070707,
"alnum_prop": 0.49479843953185954,
"repo_name": "petrk94/Bytebot",
"id": "60a6eea1f9dadca8c23e2d7cf54c7feea8829514",
"size": "3124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/fuel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62049"
}
],
"symlink_target": ""
} |
import numpy as np
def shaped_range(*shape, dtype=np.float32):
r = np.arange(np.prod(shape))
r = r.reshape(shape)
return r
def _increasing_impl(*shape, dtype=np.float32, negative=True, bias=0):
r = shaped_range(*shape, dtype=dtype)
if negative:
r -= r.size // 2
if dtype in (np.float32, np.float64):
r = r * 0.5
r += bias
return r.astype(dtype)
def increasing(*shape, dtype=np.float32):
"""Returns a monotonically increasing ndarray for test inputs.
The output will contain both zero, negative numbers, and non
integer numbers for float dtypes. A test writer is supposed to
consider this function first.
Example:
>>> onnx_chainer.testing.input_generator.increasing(3, 4)
array([[-3. , -2.5, -2. , -1.5],
[-1. , -0.5, 0. , 0.5],
[ 1. , 1.5, 2. , 2.5]], dtype=float32)
Args:
shape (tuple of int): The shape of the output array.
dtype (numpy.dtype): The dtype of the output array.
Returns:
numpy.ndarray
"""
return _increasing_impl(*shape, dtype=dtype)
def nonzero_increasing(*shape, dtype=np.float32, bias=1e-7):
"""Returns a monotonically increasing ndarray for test inputs.
Similar to `increasing` but contains no zeros. Expected to be used
for divisors.
Example:
>>> onnx_chainer.testing.input_generator.nonzero_increasing(3, 4)
array([[-3.0000000e+00, -2.5000000e+00, -1.9999999e+00, -1.4999999e+00],
[-9.9999988e-01, -4.9999991e-01, 1.0000000e-07, 5.0000012e-01],
[ 1.0000001e+00, 1.5000001e+00, 2.0000000e+00, 2.5000000e+00]],
dtype=float32)
Args:
shape (tuple of int): The shape of the output array.
dtype (numpy.dtype): The dtype of the output array.
bias (float): The bias to avoid zero.
Returns:
numpy.ndarray
"""
assert dtype in (np.float32, np.float64)
return _increasing_impl(*shape, dtype=dtype, bias=bias)
def positive_increasing(*shape, dtype=np.float32, bias=1e-7):
"""Returns a monotonically increasing ndarray for test inputs.
Similar to `increasing` but contains only positive numbers. Expected
to be used for `math.log`, `math.sqrt`, etc.
Example:
>>> onnx_chainer.testing.input_generator.positive_increasing(3, 4)
array([[1.0000000e-07, 5.0000012e-01, 1.0000001e+00, 1.5000001e+00],
[2.0000000e+00, 2.5000000e+00, 3.0000000e+00, 3.5000000e+00],
[4.0000000e+00, 4.5000000e+00, 5.0000000e+00, 5.5000000e+00]],
dtype=float32)
Args:
shape (tuple of int): The shape of the output array.
dtype (numpy.dtype): The dtype of the output array.
bias (float): The bias to avoid zero.
Returns:
numpy.ndarray
"""
return _increasing_impl(*shape, dtype=dtype, negative=False, bias=bias)
| {
"content_hash": "5ca02034d51263ed2b137738bf0935fe",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 77,
"avg_line_length": 31.26086956521739,
"alnum_prop": 0.631432545201669,
"repo_name": "pfnet/chainer",
"id": "0d4010c91a6282ab8acbbb9e0ecb92b0ad06f6e2",
"size": "2876",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "onnx_chainer/testing/input_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2564338"
}
],
"symlink_target": ""
} |
def test_spam(spam):
assert spam == "spamspam"
| {
"content_hash": "817272b88d5647f3242ecc7f7096e02f",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 25.5,
"alnum_prop": 0.6470588235294118,
"repo_name": "kawamon/hue",
"id": "65690c49f23153fdd3c631e6c9a08825fe222c8a",
"size": "75",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pytest-4.6.11/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/test_spam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
from nose.tools import (assert_false, assert_equal, assert_in, assert_true)
import mock
import pytest
import unittest
import httplib as http
from addons.base.tests.views import OAuthAddonConfigViewsTestCaseMixin
from addons.dataverse.models import DataverseProvider
from addons.dataverse.tests.utils import (
create_mock_connection, DataverseAddonTestCase, create_external_account,
)
from framework.auth.decorators import Auth
from tests.factories import AuthUserFactory
from tests.base import OsfTestCase
from addons.dataverse.serializer import DataverseSerializer
from website.util import api_url_for
pytestmark = pytest.mark.django_db
class TestAuthViews(DataverseAddonTestCase, OsfTestCase, unittest.TestCase):
def test_deauthorize(self):
url = api_url_for('dataverse_deauthorize_node',
pid=self.project._primary_key)
self.app.delete(url, auth=self.user.auth)
self.node_settings.reload()
assert_false(self.node_settings.dataverse_alias)
assert_false(self.node_settings.dataverse)
assert_false(self.node_settings.dataset_doi)
assert_false(self.node_settings.dataset)
assert_false(self.node_settings.user_settings)
# Log states that node was deauthorized
self.project.reload()
last_log = self.project.logs.latest()
assert_equal(last_log.action, 'dataverse_node_deauthorized')
log_params = last_log.params
assert_equal(log_params['node'], self.project._primary_key)
assert_equal(log_params['project'], None)
def test_user_config_get(self):
url = api_url_for('dataverse_user_config_get')
new_user = AuthUserFactory.build()
res = self.app.get(url, auth=new_user.auth)
result = res.json.get('result')
assert_false(result['userHasAuth'])
assert_in('hosts', result)
assert_in('create', result['urls'])
# userHasAuth is true with external accounts
new_user.external_accounts.add(create_external_account())
new_user.save()
res = self.app.get(url, auth=self.user.auth)
result = res.json.get('result')
assert_true(result['userHasAuth'])
class TestConfigViews(DataverseAddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):
connection = create_mock_connection()
Serializer = DataverseSerializer
client = DataverseProvider
def setUp(self):
super(TestConfigViews, self).setUp()
self.mock_ser_api = mock.patch('addons.dataverse.serializer.client.connect_from_settings')
self.mock_ser_api.return_value = create_mock_connection()
self.mock_ser_api.start()
def tearDown(self):
self.mock_ser_api.stop()
super(TestConfigViews, self).tearDown()
@mock.patch('addons.dataverse.views.client.connect_from_settings')
def test_folder_list(self, mock_connection):
#test_get_datasets
mock_connection.return_value = self.connection
url = api_url_for('dataverse_get_datasets', pid=self.project._primary_key)
params = {'alias': 'ALIAS1'}
res = self.app.post_json(url, params, auth=self.user.auth)
assert_equal(len(res.json['datasets']), 3)
first = res.json['datasets'][0]
assert_equal(first['title'], 'Example (DVN/00001)')
assert_equal(first['doi'], 'doi:12.3456/DVN/00001')
@mock.patch('addons.dataverse.views.client.connect_from_settings')
def test_set_config(self, mock_connection):
mock_connection.return_value = self.connection
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.post_json(url, {
'dataverse': {'alias': 'ALIAS3'},
'dataset': {'doi': 'doi:12.3456/DVN/00003'},
}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
self.project.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_dataset_linked'.format(self.ADDON_SHORT_NAME)
)
assert_equal(res.json['dataverse'], self.connection.get_dataverse('ALIAS3').title)
assert_equal(res.json['dataset'],
self.connection.get_dataverse('ALIAS3').get_dataset_by_doi('doi:12.3456/DVN/00003').title)
def test_get_config(self):
url = self.project.api_url_for('{0}_get_config'.format(self.ADDON_SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
assert_in('result', res.json)
serialized = self.Serializer().serialize_settings(
self.node_settings,
self.user,
)
assert_equal(serialized, res.json['result'])
@mock.patch('addons.dataverse.views.client.connect_from_settings')
def test_set_config_no_dataset(self, mock_connection):
mock_connection.return_value = self.connection
num_old_logs = self.project.logs.count()
url = api_url_for('dataverse_set_config',
pid=self.project._primary_key)
params = {
'dataverse': {'alias': 'ALIAS3'},
'dataset': {}, # The dataverse has no datasets
}
# Select a different dataset
res = self.app.post_json(url, params, auth=self.user.auth,
expect_errors=True)
self.node_settings.reload()
# Old settings did not change
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(self.node_settings.dataverse_alias, 'ALIAS2')
assert_equal(self.node_settings.dataset, 'Example (DVN/00001)')
assert_equal(self.node_settings.dataset_doi, 'doi:12.3456/DVN/00001')
# Nothing was logged
self.project.reload()
assert_equal(self.project.logs.count(), num_old_logs)
class TestHgridViews(DataverseAddonTestCase, OsfTestCase, unittest.TestCase):
@mock.patch('addons.dataverse.views.client.get_custom_publish_text')
@mock.patch('addons.dataverse.views.client.connect_from_settings')
@mock.patch('addons.dataverse.views.client.get_files')
def test_dataverse_root_published(self, mock_files, mock_connection, mock_text):
mock_connection.return_value = create_mock_connection()
mock_files.return_value = ['mock_file']
mock_text.return_value = 'Do you want to publish?'
self.project.set_privacy('public')
self.project.save()
alias = self.node_settings.dataverse_alias
doi = self.node_settings.dataset_doi
external_account = create_external_account()
self.user.external_accounts.add(external_account)
self.user.save()
self.node_settings.set_auth(external_account, self.user)
self.node_settings.dataverse_alias = alias
self.node_settings.dataset_doi = doi
self.node_settings.save()
url = api_url_for('dataverse_root_folder',
pid=self.project._primary_key)
# Contributor can select between states, current state is correct
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json[0]['permissions']['edit'])
assert_true(res.json[0]['hasPublishedFiles'])
assert_equal(res.json[0]['version'], 'latest-published')
# Non-contributor gets published version, no options
user2 = AuthUserFactory()
res = self.app.get(url, auth=user2.auth)
assert_false(res.json[0]['permissions']['edit'])
assert_true(res.json[0]['hasPublishedFiles'])
assert_equal(res.json[0]['version'], 'latest-published')
@mock.patch('addons.dataverse.views.client.get_custom_publish_text')
@mock.patch('addons.dataverse.views.client.connect_from_settings')
@mock.patch('addons.dataverse.views.client.get_files')
def test_dataverse_root_not_published(self, mock_files, mock_connection, mock_text):
mock_connection.return_value = create_mock_connection()
mock_files.return_value = []
mock_text.return_value = 'Do you want to publish?'
self.project.set_privacy('public')
self.project.save()
alias = self.node_settings.dataverse_alias
doi = self.node_settings.dataset_doi
external_account = create_external_account()
self.user.external_accounts.add(external_account)
self.user.save()
self.node_settings.set_auth(external_account, self.user)
self.node_settings.dataverse_alias = alias
self.node_settings.dataset_doi = doi
self.node_settings.save()
url = api_url_for('dataverse_root_folder',
pid=self.project._primary_key)
# Contributor gets draft, no options
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json[0]['permissions']['edit'])
assert_false(res.json[0]['hasPublishedFiles'])
assert_equal(res.json[0]['version'], 'latest')
# Non-contributor gets nothing
user2 = AuthUserFactory()
res = self.app.get(url, auth=user2.auth)
assert_equal(res.json, [])
@mock.patch('addons.dataverse.views.client.connect_from_settings')
@mock.patch('addons.dataverse.views.client.get_files')
def test_dataverse_root_no_connection(self, mock_files, mock_connection):
mock_connection.return_value = create_mock_connection()
mock_files.return_value = ['mock_file']
url = api_url_for('dataverse_root_folder',
pid=self.project._primary_key)
mock_connection.return_value = None
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json, [])
def test_dataverse_root_incomplete(self):
self.node_settings.dataset_doi = None
self.node_settings.save()
url = api_url_for('dataverse_root_folder',
pid=self.project._primary_key)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json, [])
class TestCrudViews(DataverseAddonTestCase, OsfTestCase, unittest.TestCase):
@mock.patch('addons.dataverse.views.client.connect_from_settings_or_401')
@mock.patch('addons.dataverse.views.client.publish_dataset')
@mock.patch('addons.dataverse.views.client.publish_dataverse')
def test_dataverse_publish_dataset(self, mock_publish_dv, mock_publish_ds, mock_connection):
mock_connection.return_value = create_mock_connection()
url = api_url_for('dataverse_publish_dataset',
pid=self.project._primary_key)
self.app.put_json(url, params={'publish_both': False}, auth=self.user.auth)
# Only dataset was published
assert_false(mock_publish_dv.called)
assert_true(mock_publish_ds.called)
@mock.patch('addons.dataverse.views.client.connect_from_settings_or_401')
@mock.patch('addons.dataverse.views.client.publish_dataset')
@mock.patch('addons.dataverse.views.client.publish_dataverse')
def test_dataverse_publish_both(self, mock_publish_dv, mock_publish_ds, mock_connection):
mock_connection.return_value = create_mock_connection()
url = api_url_for('dataverse_publish_dataset',
pid=self.project._primary_key)
self.app.put_json(url, params={'publish_both': True}, auth=self.user.auth)
# Both Dataverse and dataset were published
assert_true(mock_publish_dv.called)
assert_true(mock_publish_ds.called)
class TestDataverseRestrictions(DataverseAddonTestCase, OsfTestCase):
def setUp(self):
super(DataverseAddonTestCase, self).setUp()
# Nasty contributor who will try to access content that he shouldn't
# have access to
self.contrib = AuthUserFactory()
self.project.add_contributor(self.contrib, auth=Auth(self.user))
self.project.save()
@mock.patch('addons.dataverse.views.client.connect_from_settings')
def test_restricted_set_dataset_not_owner(self, mock_connection):
mock_connection.return_value = create_mock_connection()
# Contributor has dataverse auth, but is not the node authorizer
self.contrib.add_addon('dataverse')
self.contrib.save()
url = api_url_for('dataverse_set_config', pid=self.project._primary_key)
params = {
'dataverse': {'alias': 'ALIAS1'},
'dataset': {'doi': 'doi:12.3456/DVN/00002'},
}
res = self.app.post_json(url, params, auth=self.contrib.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
| {
"content_hash": "95f4ad3601f712abc9a0cd9724c22a57",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 102,
"avg_line_length": 41.226229508196724,
"alnum_prop": 0.6550819150628281,
"repo_name": "cwisecarver/osf.io",
"id": "1ae817c80de1794831fecf68909d8ca3bbb81d3b",
"size": "12598",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "addons/dataverse/tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "217501"
},
{
"name": "JavaScript",
"bytes": "1712859"
},
{
"name": "Mako",
"bytes": "622293"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7621431"
}
],
"symlink_target": ""
} |
import base64
import json
import logging
import re
import StringIO
import urllib
from avro import datafile, io
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse, render
from hbase import conf
from hbase.settings import DJANGO_APPS
from hbase.api import HbaseApi
from hbase.management.commands import hbase_setup
from server.hbase_lib import get_thrift_type
LOG = logging.getLogger(__name__)
def has_write_access(user):
return user.is_superuser or user.has_hue_permission(action="write", app=DJANGO_APPS[0])
def app(request):
return render('app.mako', request, {
'can_write': has_write_access(request.user)
})
# action/cluster/arg1/arg2/arg3...
def api_router(request, url): # On split, deserialize anything
def safe_json_load(raw):
try:
return json.loads(re.sub(r'(?:\")([0-9]+)(?:\")', r'\1', str(raw)))
except:
LOG.exception('failed to parse input as json')
return raw
def deserialize(data):
if type(data) == dict:
special_type = get_thrift_type(data.pop('hue-thrift-type', ''))
if special_type:
return special_type(data)
if hasattr(data, "__iter__"):
for i, item in enumerate(data):
data[i] = deserialize(item) # Sets local binding, needs to set in data
return data
decoded_url_params = [urllib.unquote(arg) for arg in re.split(r'(?<!\\)/', url.strip('/'))]
url_params = [safe_json_load((arg, request.POST.get(arg[0:16], arg))[arg[0:15] == 'hbase-post-key-'])
for arg in decoded_url_params] # Deserialize later
if request.POST.get('dest', False):
url_params += [request.FILES.get(request.REQUEST.get('dest'))]
return api_dump(HbaseApi(request.user).query(*url_params))
def api_dump(response):
ignored_fields = ('thrift_spec', '__.+__')
trunc_limit = conf.TRUNCATE_LIMIT.get()
def clean(data):
try:
json.dumps(data)
return data
except:
LOG.exception('Failed to dump data as JSON')
cleaned = {}
lim = [0]
if isinstance(data, str): # Not JSON dumpable, meaning some sort of bytestring or byte data
#detect if avro file
if(data[:3] == '\x4F\x62\x6A'):
#write data to file in memory
output = StringIO.StringIO()
output.write(data)
#read and parse avro
rec_reader = io.DatumReader()
df_reader = datafile.DataFileReader(output, rec_reader)
return json.dumps(clean([record for record in df_reader]))
return base64.b64encode(data)
if hasattr(data, "__iter__"):
if type(data) is dict:
for i in data:
cleaned[i] = clean(data[i])
elif type(data) is list:
cleaned = []
for i, item in enumerate(data):
cleaned += [clean(item)]
else:
for i, item in enumerate(data):
cleaned[i] = clean(item)
else:
for key in dir(data):
value = getattr(data, key)
if value is not None and not hasattr(value, '__call__') and sum([int(bool(re.search(ignore, key)))
for ignore in ignored_fields]) == 0:
cleaned[key] = clean(value)
return cleaned
return JsonResponse({
'data': clean(response),
'truncated': True,
'limit': trunc_limit,
})
def install_examples(request):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
hbase_setup.Command().handle(user=request.user)
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
| {
"content_hash": "51b40c60a5462c6f684c36de80156c66",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 111,
"avg_line_length": 30.048,
"alnum_prop": 0.6094249201277955,
"repo_name": "azureplus/hue",
"id": "9477efc9d12cf54586174af2ff13246e5a31e075",
"size": "4549",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "apps/hbase/src/hbase/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2391760"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "428494"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21113316"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "2836295"
},
{
"name": "Makefile",
"bytes": "93726"
},
{
"name": "Mako",
"bytes": "2174194"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "32872939"
},
{
"name": "Scala",
"bytes": "178710"
},
{
"name": "Shell",
"bytes": "52923"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101931"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
} |
import unittest
from pushalot.factory import PushalotFactory
from pushalot.apis import APILatest
from pushalot.transport import HTTPTransport
class DummyAPI(object):
pass
class DummyTransport(object):
pass
class FactoryTests(unittest.TestCase):
def test_without_token_raises_exception(self):
with self.assertRaises(TypeError):
pushalot = PushalotFactory.create()
def test_with_token_success(self):
pushalot = PushalotFactory.create(token='some-token')
def test_invalid_api_raises_exception(self):
with self.assertRaises(RuntimeError):
pushalot = PushalotFactory.create(
token='some-token',
api=DummyAPI
)
def test_valid_api_used_success(self):
pushalot = PushalotFactory.create(
token='some-token',
api=APILatest
)
def test_factory_initiates_correct_default_api(self):
pushalot = PushalotFactory.create(
token='some-token',
)
self.assertEqual(pushalot.__class__.__name__, 'APILatest')
def test_invalid_transport_raises(self):
with self.assertRaises(RuntimeError):
pushalot = PushalotFactory.create(
token='some-token',
transport=DummyTransport
)
def test_valid_transport_success(self):
pushalot = PushalotFactory.create(
token='some-token',
transport=HTTPTransport
)
def test_factory_uses_correct_default_transport(self):
pushalot = PushalotFactory.create(
token='some-token',
)
self.assertEqual(pushalot._transport.__class__.__name__, 'HTTPTransport')
| {
"content_hash": "a052622aa3b7779f922817caf0f13e66",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 81,
"avg_line_length": 29.050847457627118,
"alnum_prop": 0.6306884480746792,
"repo_name": "bosha/pypushalot",
"id": "b11b398339ed5c092ec103fd1d341a1eb1c49e49",
"size": "1714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31278"
}
],
"symlink_target": ""
} |
''' Document Bokeh named colors.
The ``bokeh-color`` directive accepts a named color as its argument:
.. code-block:: rest
.. bokeh-color:: aliceblue
and generates a labeled color swatch as output.
.. bokeh-color:: aliceblue
The ``bokeh-color`` direction may be used explicitly, but it can also be used
in conjunction with the :ref:`bokeh.sphinxext.bokeh_autodoc` extension.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
# Bokeh imports
from bokeh.colors import named
from .bokeh_directive import BokehDirective
from .templates import COLOR_DETAIL
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'BokehColorDirective',
'setup',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehColorDirective(BokehDirective):
has_content = False
required_arguments = 1
option_spec = {
'module': unchanged,
}
def run(self):
color = self.arguments[0]
html = COLOR_DETAIL.render(color=getattr(named, color).to_css(), text=color)
node = nodes.raw('', html, format="html")
return [node]
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_directive_to_domain('py', 'bokeh-color', BokehColorDirective)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "8351562ac1e7e73593bfdf4a1f1c525c",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 84,
"avg_line_length": 30.404761904761905,
"alnum_prop": 0.3962411902897416,
"repo_name": "timsnyder/bokeh",
"id": "b35e9419b1313cb93e771c358ec55caba717b3da",
"size": "2885",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/sphinxext/bokeh_color.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/squill/shared_lair_squill.iff"
result.attribute_template_id = -1
result.stfName("lair_n","squill")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "e5b50a74109032c2b089bf5702d070ae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 22.692307692307693,
"alnum_prop": 0.688135593220339,
"repo_name": "obi-two/Rebelion",
"id": "d9ac81d2ead3571600979d95b951db53fa1f60e8",
"size": "440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/lair/squill/shared_lair_squill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from cms.tests.static_analysis import StaticAnalysisTest # nopyflakes
from cms.tests.admin import * # nopyflakes
from cms.tests.api import * # nopyflakes
from cms.tests.apphooks import * # nopyflakes
from cms.tests.docs import * # nopyflakes
from cms.tests.extensions import * # nopyflakes
from cms.tests.forms import * # nopyflakes
from cms.tests.i18n import * # nopyflakes
from cms.tests.mail import * # nopyflakes
from cms.tests.menu import * # nopyflakes
from cms.tests.menu_utils import * # nopyflakes
from cms.tests.multilingual import * # nopyflakes
from cms.tests.navextender import * # nopyflakes
from cms.tests.nonroot import * # nopyflakes
from cms.tests.page import * # nopyflakes
from cms.tests.permissions import * # nopyflakes
from cms.tests.permmod import * # nopyflakes
from cms.tests.placeholder import * # nopyflakes
from cms.tests.plugins import * # nopyflakes
from cms.tests.po import * # nopyflakes
from cms.tests.publisher import * # nopyflakes
from cms.tests.rendering import * # nopyflakes
from cms.tests.reversion_tests import * # nopyflakes
from cms.tests.security import * # nopyflakes
from cms.tests.settings import * # nopyflakes
from cms.tests.site import * # nopyflakes
from cms.tests.sitemap import * # nopyflakes
from cms.tests.static_placeholder import * # nopyflakes
from cms.tests.staticfiles import * # nopyflakes
from cms.tests.templatetags import * # nopyflakes
from cms.tests.toolbar import * # nopyflakes
from cms.tests.toolbar_pool import * # nopyflakes
from cms.tests.urlutils import * # nopyflakes
from cms.tests.views import * # nopyflakes
from cms.tests.management import * # nopyflakes
from cms.tests.fixture_loading import * # nopyflakes
from cms.tests.menu_page_viewperm import * # nopyflakes
from cms.tests.menu_page_viewperm_staff import * # nopyflakes
from cms.tests.nested_plugins import * # nopyflakes
from cms.tests.check import * # nopyflakes
from cms.tests.frontend import * # nopyflakes
from cms.tests.signals import * # nopyflakes
from cms.tests.no_i18n import * # nopyflakes
from cms.tests.cache import * # nopyflakes
| {
"content_hash": "83b3901089f3fa62992c7ce35b61b47d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 70,
"avg_line_length": 47.2,
"alnum_prop": 0.7660075329566854,
"repo_name": "SurfasJones/icecream-info",
"id": "057e126a9c094c334f0320697b4715a0209ccf63",
"size": "2149",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "icecream/lib/python2.7/site-packages/cms/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "288937"
},
{
"name": "JavaScript",
"bytes": "589933"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18137514"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "10274"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import time
from .termcolor import TermColor
from . import printer_console
prcolor = printer_console.prcolor
def plog(tag, msg, showtime = True, showdate = False,
prefix = '', suffix = '', fg = TermColor.Nil, bg = TermColor.Nil):
if showtime or showdate:
now = time.localtime()
if showtime:
tag += time.strftime("[%H:%M:%S] ", now)
if showdate:
tag += time.strftime("[%Y-%m-%d] ", now)
if prefix:
prcolor("{}{}".format(tag, prefix), fg, bg)
prcolor("{}{}".format(tag, msg), fg, bg)
if suffix:
prcolor("{}{}".format(tag, suffix), fg, bg)
def perr(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<E> ', msg, showtime, showdate, prefix, suffix, TermColor.Red)
def pwarn(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<W> ', msg, showtime, showdate, prefix, suffix, TermColor.Yellow)
def bannerwarn(msg):
pwarn('!' * 160, showtime = False)
pwarn(msg, showtime = False)
pwarn('!' * 160, showtime = False)
def pinfo(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<I> ', msg, showtime, showdate, prefix, suffix, TermColor.Green)
def pdbg(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<D> ', msg, showtime, showdate, prefix, suffix, TermColor.Cyan)
| {
"content_hash": "fc49f6c62d9a027d060b7ee475d5b600",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6540350877192982,
"repo_name": "liuycsd/bypy",
"id": "8f9575e1cc01f69c902ebf43631b1324cade102d",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bypy/printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187099"
},
{
"name": "Shell",
"bytes": "1589"
}
],
"symlink_target": ""
} |
import unittest
import fern
from fern.ast import NameRef
class BasicNameref(unittest.TestCase):
'''Sans actual name lookups'''
def setUp(self):
self.nr = fern.ast.NameRef('name')
def testNameMatches(self):
self.assertEqual(self.nr.name, 'name')
class SimpleLookup(unittest.TestCase):
def testSimple(self):
m = fern.ast.Map()
m['var'] = 42
m['ref'] = NameRef('var')
m.refresh()
self.assertEqual(m['ref'], 42)
class TestLookup(unittest.TestCase):
'''Besides just NameRef, this is a test of all name-lookup
facilities in Lyanna. Kinda SRP-violating...'''
def setUp(self):
'''set up a simple but comprehensive test environment
for looking up names
'''
# make a bunch of child objects
# basically:
# self.m = {
# var = 42
# one = var
# two = [var]
# three = {foo=var}
# }
self.one = NameRef('var')
self.two = fern.ast.List(); self.two.put(NameRef('var'))
self.three = fern.ast.Map(); self.three['foo'] = NameRef('var')
self.m = fern.ast.Map()
self.m['var'] = 42
self.m['one'] = self.one
self.m['two'] = self.two
self.m['three'] = self.three
self.m.refresh()
def testDirectRef(self):
self.assertEqual(self.one.eval(), 42)
def testRefThroughMap(self):
self.assertEqual(self.three['foo'], 42)
def testRefThroughList(self):
self.assertEqual(self.two[0], 42)
def testDirectRefMutated(self):
self.m['var'] = 13
self.assertEqual(self.one.eval(), 13)
def testRefThroughMapMutated(self):
self.m.set_key('var', 13)
self.m.refresh()
self.assertEqual(self.three['foo'], 13)
def testRefThroughListMutated(self):
self.m['var'] = 13
self.assertEqual(self.two[0], 13)
def testInvalidKeyReturnsUndefined(self):
invalid = NameRef('nope')
self.three['bar'] = invalid
self.assertEqual(invalid.eval(), fern.primitives.Undefined)
| {
"content_hash": "a95687e108935f5512b212f6e3fcb5ad",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 71,
"avg_line_length": 33.44444444444444,
"alnum_prop": 0.5804461319411486,
"repo_name": "andrewf/fern",
"id": "a0d2356b8f3583c0ae5b54c875904879cc061301",
"size": "2107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ast/testNameref.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55100"
}
],
"symlink_target": ""
} |
import os
import tempfile
import logging
import datetime
from azure.storage.blob import blockblobservice
from luigi.format import get_default_format
from luigi.target import FileAlreadyExists, FileSystem, AtomicLocalFile, FileSystemTarget
logger = logging.getLogger('luigi-interface')
class AzureBlobClient(FileSystem):
"""
Create an Azure Blob Storage client for authentication.
Users can create multiple storage account, each of which acts like a silo. Under each storage account, we can
create a container. Inside each container, the user can create multiple blobs.
For each account, there should be an account key. This account key cannot be changed and one can access all the
containers and blobs under this account using the account key.
Usually using an account key might not always be the best idea as the key can be leaked and cannot be revoked. The
solution to this issue is to create Shared `Access Signatures` aka `sas`. A SAS can be created for an entire
container or just a single blob. SAS can be revoked.
"""
def __init__(self, account_name=None, account_key=None, sas_token=None, **kwargs):
"""
:param str account_name:
The storage account name. This is used to authenticate requests signed with an account key\
and to construct the storage endpoint. It is required unless a connection string is given,\
or if a custom domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
:param str sas_token:
A shared access signature token to use to authenticate requests instead of the account key.
:param dict kwargs:
A key-value pair to provide additional connection options.
* `protocol` - The protocol to use for requests. Defaults to https.
* `connection_string` - If specified, this will override all other parameters besides request session.\
See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format
* `endpoint_suffix` - The host base component of the url, minus the account name. Defaults to Azure\
(core.windows.net). Override this to use the China cloud (core.chinacloudapi.cn).
* `custom_domain` - The custom domain to use. This can be set in the Azure Portal. For example, ‘www.mydomain.com’.
* `token_credential` - A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration.
"""
self.options = {"account_name": account_name, "account_key": account_key, "sas_token": sas_token}
self.kwargs = kwargs
@property
def connection(self):
return blockblobservice.BlockBlobService(account_name=self.options.get("account_name"),
account_key=self.options.get("account_key"),
sas_token=self.options.get("sas_token"),
protocol=self.kwargs.get("protocol"),
connection_string=self.kwargs.get("connection_string"),
endpoint_suffix=self.kwargs.get("endpoint_suffix"),
custom_domain=self.kwargs.get("custom_domain"),
is_emulated=self.kwargs.get("is_emulated") or False)
def upload(self, tmp_path, container, blob, **kwargs):
logging.debug("Uploading file '{tmp_path}' to container '{container}' and blob '{blob}'".format(
tmp_path=tmp_path, container=container, blob=blob))
self.create_container(container)
lease_id = self.connection.acquire_blob_lease(container, blob)\
if self.exists("{container}/{blob}".format(container=container, blob=blob)) else None
try:
self.connection.create_blob_from_path(container, blob, tmp_path, lease_id=lease_id, progress_callback=kwargs.get("progress_callback"))
finally:
if lease_id is not None:
self.connection.release_blob_lease(container, blob, lease_id)
def download_as_bytes(self, container, blob, bytes_to_read=None):
start_range, end_range = (0, bytes_to_read-1) if bytes_to_read is not None else (None, None)
logging.debug("Downloading from container '{container}' and blob '{blob}' as bytes".format(
container=container, blob=blob))
return self.connection.get_blob_to_bytes(container, blob, start_range=start_range, end_range=end_range).content
def download_as_file(self, container, blob, location):
logging.debug("Downloading from container '{container}' and blob '{blob}' to {location}".format(
container=container, blob=blob, location=location))
return self.connection.get_blob_to_path(container, blob, location)
def create_container(self, container_name):
return self.connection.create_container(container_name)
def delete_container(self, container_name):
lease_id = self.connection.acquire_container_lease(container_name)
self.connection.delete_container(container_name, lease_id=lease_id)
def exists(self, path):
container, blob = self.splitfilepath(path)
return self.connection.exists(container, blob)
def remove(self, path, recursive=True, skip_trash=True):
container, blob = self.splitfilepath(path)
if not self.exists(path):
return False
lease_id = self.connection.acquire_blob_lease(container, blob)
self.connection.delete_blob(container, blob, lease_id=lease_id)
return True
def mkdir(self, path, parents=True, raise_if_exists=False):
container, blob = self.splitfilepath(path)
if raise_if_exists and self.exists(path):
raise FileAlreadyExists("The Azure blob path '{blob}' already exists under container '{container}'".format(
blob=blob, container=container))
def isdir(self, path):
"""
Azure Blob Storage has no concept of directories. It always returns False
:param str path: Path of the Azure blob storage
:return: False
"""
return False
def move(self, path, dest):
try:
return self.copy(path, dest) and self.remove(path)
except IOError:
self.remove(dest)
return False
def copy(self, path, dest):
source_container, source_blob = self.splitfilepath(path)
dest_container, dest_blob = self.splitfilepath(dest)
if source_container != dest_container:
raise Exception(
"Can't copy blob from '{source_container}' to '{dest_container}'. File can be moved within container".format(
source_container=source_container, dest_container=dest_container
))
source_lease_id = self.connection.acquire_blob_lease(source_container, source_blob)
destination_lease_id = self.connection.acquire_blob_lease(dest_container, dest_blob) if self.exists(dest) else None
try:
return self.connection.copy_blob(source_container, dest_blob, self.connection.make_blob_url(
source_container, source_blob),
destination_lease_id=destination_lease_id, source_lease_id=source_lease_id)
finally:
self.connection.release_blob_lease(source_container, source_blob, source_lease_id)
if destination_lease_id is not None:
self.connection.release_blob_lease(dest_container, dest_blob, destination_lease_id)
def rename_dont_move(self, path, dest):
self.move(path, dest)
@staticmethod
def splitfilepath(filepath):
splitpath = filepath.split("/")
container = splitpath[0]
blobsplit = splitpath[1:]
blob = None if not blobsplit else "/".join(blobsplit)
return container, blob
class ReadableAzureBlobFile:
def __init__(self, container, blob, client, download_when_reading, **kwargs):
self.container = container
self.blob = blob
self.client = client
self.closed = False
self.download_when_reading = download_when_reading
self.azure_blob_options = kwargs
self.download_file_location = os.path.join(tempfile.mkdtemp(prefix=str(datetime.datetime.utcnow())), blob)
self.fid = None
def read(self, n=None):
return self.client.download_as_bytes(self.container, self.blob, n)
def __enter__(self):
if self.download_when_reading:
self.client.download_as_file(self.container, self.blob, self.download_file_location)
self.fid = open(self.download_file_location)
return self.fid
else:
return self
def __exit__(self, exc_type, exc, traceback):
self.close()
def __del__(self):
self.close()
if os._exists(self.download_file_location):
os.remove(self.download_file_location)
def close(self):
if self.download_when_reading:
if self.fid is not None and not self.fid.closed:
self.fid.close()
self.fid = None
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
def seek(self, offset, whence=None):
pass
class AtomicAzureBlobFile(AtomicLocalFile):
def __init__(self, container, blob, client, **kwargs):
super(AtomicAzureBlobFile, self).__init__(os.path.join(container, blob))
self.container = container
self.blob = blob
self.client = client
self.azure_blob_options = kwargs
def move_to_final_destination(self):
self.client.upload(self.tmp_path, self.container, self.blob, **self.azure_blob_options)
class AzureBlobTarget(FileSystemTarget):
"""
Create an Azure Blob Target for storing data on Azure Blob Storage
"""
def __init__(self, container, blob, client=None, format=None, download_when_reading=True, **kwargs):
"""
:param str account_name:
The storage account name. This is used to authenticate requests signed with an account key and to construct
the storage endpoint. It is required unless a connection string is given, or if a custom domain is
used with anonymous authentication.
:param str container:
The azure container in which the blob needs to be stored
:param str blob:
The name of the blob under container specified
:param str client:
An instance of :class:`.AzureBlobClient`. If none is specified, anonymous access would be used
:param str format:
An instance of :class:`luigi.format`.
:param bool download_when_reading:
Determines whether the file has to be downloaded to temporary location on disk. Defaults to `True`.
Pass the argument **progress_callback** with signature *(func(current, total))* to get real time progress of upload
"""
super(AzureBlobTarget, self).__init__(os.path.join(container, blob))
if format is None:
format = get_default_format()
self.container = container
self.blob = blob
self.client = client or AzureBlobClient()
self.format = format
self.download_when_reading = download_when_reading
self.azure_blob_options = kwargs
@property
def fs(self):
"""
The :py:class:`FileSystem` associated with :class:`.AzureBlobTarget`
"""
return self.client
def open(self, mode):
"""
Open the target for reading or writing
:param char mode:
'r' for reading and 'w' for writing.
'b' is not supported and will be stripped if used. For binary mode, use `format`
:return:
* :class:`.ReadableAzureBlobFile` if 'r'
* :class:`.AtomicAzureBlobFile` if 'w'
"""
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
return self.format.pipe_reader(ReadableAzureBlobFile(self.container, self.blob, self.client, self.download_when_reading, **self.azure_blob_options))
else:
return self.format.pipe_writer(AtomicAzureBlobFile(self.container, self.blob, self.client, **self.azure_blob_options))
| {
"content_hash": "d5ca2e0c561289414982c8b51492445c",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 160,
"avg_line_length": 46.123636363636365,
"alnum_prop": 0.640807316304005,
"repo_name": "jamesmcm/luigi",
"id": "20de24224a4670810a2e5a5f8b7d258b25111828",
"size": "13302",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "luigi/contrib/azureblob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5051"
},
{
"name": "HTML",
"bytes": "39409"
},
{
"name": "JavaScript",
"bytes": "166869"
},
{
"name": "Python",
"bytes": "1823554"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
} |
"""Symbolic configuration API of MXNet."""
from __future__ import absolute_import as _abs
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
from array import array
import ctypes
import warnings
from numbers import Number
import numpy as _numpy
from ..attribute import AttrScope
from ..base import _LIB, numeric_types, c_array, c_array_buf, c_str, c_str_array, c_handle_array
from ..base import mx_uint, py_str, string_types, integer_types
from ..base import NDArrayHandle, ExecutorHandle, SymbolHandle
from ..base import check_call, MXNetError, NotImplementedForSymbol
from ..context import Context, current_context
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP, _GRAD_REQ_MAP
from ..ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
from ..ndarray import _ndarray_cls
from ..executor import Executor
from . import _internal
from . import op
from ._internal import SymbolBase, _set_symbol_class
__all__ = ["Symbol", "var", "Variable", "Group", "load", "load_json",
"pow", "maximum", "minimum", "hypot", "eye", "zeros", "ones", "full", "arange",
"histogram", "split_v2"]
class Symbol(SymbolBase):
"""Symbol is symbolic graph of the mxnet."""
# disable dictionary storage, also do not have parent type.
# pylint: disable=no-member
__slots__ = []
# Make numpy functions return Symbol instead of numpy object array
__array_priority__ = 1000.0
def __repr__(self):
"""Gets a string representation of the symbol."""
name = self.name
if name is None:
name = ', '.join([i.name for i in self])
return '<%s group [%s]>' % (self.__class__.__name__, name)
else:
return '<%s %s>' % (self.__class__.__name__, name)
def __iter__(self):
"""Returns a generator object of symbol.
One can loop through the returned object list to get outputs.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a+b
>>> d = mx.sym.Variable('d')
>>> e = d+c
>>> out = e.get_children()
>>> out
<Symbol Grouped>
>>> for i in out:
... i
...
<Symbol d>
<Symbol _plus0>
"""
return (self[i] for i in self.list_outputs())
def __add__(self, other):
"""x.__add__(y) <=> x+y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_add` instead. """
if isinstance(other, Symbol):
return _internal._Plus(self, other)
if isinstance(other, Number):
return _internal._PlusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __bool__(self):
raise NotImplementedForSymbol(self.__bool__, 'bool')
__nonzero__ = __bool__
def __iadd__(self, other):
raise NotImplementedForSymbol(self.__iadd__, '+=', other, 1)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_sub` instead. """
if isinstance(other, Symbol):
return _internal._Minus(self, other)
if isinstance(other, Number):
return _internal._MinusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __isub__(self, other):
raise NotImplementedForSymbol(self.__isub__, '-=', other)
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x
Only `NDArray` is supported for now.
Example
-------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rsub__(y).asnumpy()
array([[-2., -2., -2.],
[-2., -2., -2.]], dtype=float32)
"""
if isinstance(other, Number):
return _internal._RMinusScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __mul__(self, other):
"""x.__mul__(y) <=> x*y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_mul` instead. """
if isinstance(other, Symbol):
return _internal._Mul(self, other)
if isinstance(other, Number):
return _internal._MulScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __imul__(self, other):
raise NotImplementedForSymbol(self.__imul__, '*=', other)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_div` instead. """
if isinstance(other, Symbol):
return _internal._Div(self, other)
if isinstance(other, Number):
return _internal._DivScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x
Only `NDArray` is supported for now.
Example
-------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rdiv__(y).asnumpy()
array([[ 0.33333334, 0.33333334, 0.33333334],
[ 0.33333334, 0.33333334, 0.33333334]], dtype=float32)
"""
if isinstance(other, Number):
return _internal._RDivScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __mod__(self, other):
"""x.__mod__(y) <=> x%y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_mod` instead. """
if isinstance(other, Symbol):
return _internal._Mod(self, other)
if isinstance(other, Number):
return _internal._ModScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmod__(self, other):
"""x.__rmod__(y) <=> y%x
Only `NDArray` is supported for now.
Example
-------
>>> x = mx.nd.ones((2,3))*3
>>> y = mx.nd.ones((2,3))
>>> x.__rmod__(y).asnumpy()
array([[ 1., 1., 1.,
[ 1., 1., 1., dtype=float32)
"""
if isinstance(other, Number):
return _internal._RModScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __idiv__(self, other):
raise NotImplementedForSymbol(self.__idiv__, '/=', other)
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __itruediv__(self, other):
raise NotImplementedForSymbol(self.__itruediv__, '/=', other)
def __pow__(self, other):
"""x.__pow__(y) <=> x**y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_pow` instead. """
if isinstance(other, Symbol):
return _internal._Power(self, other)
if isinstance(other, Number):
return _internal._PowerScalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rpow__(self, other):
raise NotImplementedForSymbol(self.__rpow__, 'y**x', other)
def __neg__(self):
"""x.__neg__() <=> -x
Numerical negative, element-wise.
Example
-------
>>> a = mx.sym.Variable('a')
>>> a
<Symbol a>
>>> -a
<Symbol _mulscalar0>
>>> a_neg = a.__neg__()
>>> c = a_neg*b
>>> ex = c.eval(ctx=mx.cpu(), a=mx.nd.ones([2,3]), b=mx.nd.ones([2,3]))
>>> ex[0].asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
return self.__mul__(-1.0)
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
"""Returns a deep copy of the input object.
This function returns a deep copy of the input object including the current state
of all its parameters such as weights, biases, etc.
Any changes made to the deep copy do not reflect in the original object.
Example
-------
>>> import copy
>>> data = mx.sym.Variable('data')
>>> data_1 = copy.deepcopy(data)
>>> data_1 = 2*data
>>> data_1.tojson()
>>> data_1 is data # Data got modified
False
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolCopy(self.handle,
ctypes.byref(handle)))
return Symbol(handle)
def __eq__(self, other):
"""x.__eq__(y) <=> x==y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_equal` instead. """
if isinstance(other, Symbol):
return _internal._equal(self, other)
if isinstance(other, numeric_types):
return _internal._equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __ne__(self, other):
"""x.__ne__(y) <=> x!=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_not_equal` instead. """
if isinstance(other, Symbol):
return _internal._not_equal(self, other)
if isinstance(other, numeric_types):
return _internal._not_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __gt__(self, other):
"""x.__gt__(y) <=> x>y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_greater` instead. """
if isinstance(other, Symbol):
return _internal._greater(self, other)
if isinstance(other, numeric_types):
return _internal._greater_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __ge__(self, other):
"""x.__ge__(y) <=> x>=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_greater_equal` instead. """
if isinstance(other, Symbol):
return _internal._greater_equal(self, other)
if isinstance(other, numeric_types):
return _internal._greater_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __lt__(self, other):
"""x.__lt__(y) <=> x<y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_lesser` instead. """
if isinstance(other, Symbol):
return _internal._lesser(self, other)
if isinstance(other, numeric_types):
return _internal._lesser_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __le__(self, other):
"""x.__le__(y) <=> x<=y
Scalar input is supported.
Broadcasting is not supported. Use `broadcast_lesser_equal` instead. """
if isinstance(other, Symbol):
return _internal._lesser_equal(self, other)
if isinstance(other, numeric_types):
return _internal._lesser_equal_scalar(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __getstate__(self):
handle = self.handle
if handle is not None:
return {'handle': self.tojson()}
else:
return {'handle': None}
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
handle = state['handle']
if handle is not None:
json_str = handle
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
self.handle = handle
else:
self.handle = None
def __call__(self, *args, **kwargs):
"""Composes symbol using inputs.
x.__call__(y, z) <=> x(y,z)
This function internally calls `_compose` to compose the symbol and
returns the composed symbol.
Example
-------
>>> data = mx.symbol.Variable('data')
>>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
>>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
>>> composed = net2(fc3_data=net1, name='composed')
>>> composed
<Symbol composed>
>>> called = net2.__call__(fc3_data=net1, name='composed')
>>> called
<Symbol composed>
Parameters
----------
args:
Positional arguments.
kwargs:
Keyword arguments.
Returns
-------
The resulting symbol.
"""
s = self.__copy__()
s._compose(*args, **kwargs)
return s
def _compose(self, *args, **kwargs):
"""Composes symbol using inputs.
x._compose(y, z) <=> x(y,z)
This function mutates the current symbol.
Example
-------
>>> data = mx.symbol.Variable('data')
>>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
>>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
>>> net2
<Symbol fc3>
>>> net2._compose(fc3_data=net1, name='composed')
>>> net2
<Symbol composed>
Parameters
----------
args:
Positional arguments.
kwargs:
Keyword arguments.
Returns
-------
The resulting symbol.
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, Symbol):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, Symbol):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_str_array(kwargs.keys())
args = c_handle_array(kwargs.values())
else:
keys = None
args = c_handle_array(args)
check_call(_LIB.MXSymbolCompose(
self.handle, name, num_args, keys, args))
def __getitem__(self, index):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of the input symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> a.__getitem__(0)
<Symbol a>
>>> a[0]
<Symbol a>
Parameters
----------
index : int or str
Indexing key
"""
output_count = len(self)
if isinstance(index, py_slice):
start = 0 if index.start is None else index.start
stop = output_count if index.stop is None else index.stop
step = 1 if index.step is None else index.step
return Group([self[i] for i in range(start, stop, step)])
if isinstance(index, string_types):
# Returning this list of names is expensive. Some symbols may have hundreds of outputs
output_names = self.list_outputs()
idx = None
for i, name in enumerate(output_names):
if name == index:
if idx is not None:
raise ValueError('There are multiple outputs with name \"%s\"' % index)
idx = i
if idx is None:
raise ValueError('Cannot find output that matches name \"%s\"' % index)
index = idx
if not isinstance(index, int):
raise TypeError('Symbol only support integer index to fetch i-th output')
if index >= output_count:
# Important, python determines the end by this exception
raise IndexError
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetOutput(
self.handle, mx_uint(index), ctypes.byref(handle)))
return Symbol(handle=handle)
@property
def name(self):
"""Gets name string from the symbol, this function only works for non-grouped symbol.
Returns
-------
value : str
The name of this symbol, returns ``None`` for grouped symbol.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetName(
self.handle, ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def attr(self, key):
"""Returns the attribute string for corresponding input key from the symbol.
This function only works for non-grouped symbols.
Example
-------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.attr('mood')
'angry'
Parameters
----------
key : str
The key corresponding to the desired attribute.
Returns
-------
value : str
The desired attribute value, returns ``None`` if the attribute does not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def list_attr(self, recursive=False):
"""Gets all attributes from the symbol.
Example
-------
>>> data = mx.sym.Variable('data', attr={'mood': 'angry'})
>>> data.list_attr()
{'mood': 'angry'}
Returns
-------
ret : Dict of str to str
A dictionary mapping attribute keys to values.
"""
if recursive:
raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. "
"Please use attr_dict instead.")
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttrShallow
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}
def attr_dict(self):
"""Recursively gets all attributes from the symbol and its children.
Example
-------
>>> a = mx.sym.Variable('a', attr={'a1':'a2'})
>>> b = mx.sym.Variable('b', attr={'b1':'b2'})
>>> c = a+b
>>> c.attr_dict()
{'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}}
Returns
-------
ret : Dict of str to dict
There is a key in the returned dict for every child with non-empty attribute set.
For each symbol, the name of the symbol is its key in the dict
and the correspond value is that symbol's attribute list (itself a dictionary).
"""
size = mx_uint()
pairs = ctypes.POINTER(ctypes.c_char_p)()
f_handle = _LIB.MXSymbolListAttr
check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))
ret = {}
for i in range(size.value):
name, key = py_str(pairs[i * 2]).split('$')
val = py_str(pairs[i * 2 + 1])
if name not in ret:
ret[name] = {}
ret[name][key] = val
return ret
def _set_attr(self, **kwargs):
"""Sets an attribute of the symbol.
For example. A._set_attr(foo="bar") adds the mapping ``"{foo: bar}"``
to the symbol's attribute dictionary.
Parameters
----------
**kwargs
The attributes to set
"""
for key, value in kwargs.items():
if not isinstance(value, string_types):
raise ValueError("Set Attr only accepts string values")
check_call(_LIB.MXSymbolSetAttr(
self.handle, c_str(key), c_str(str(value))))
def get_internals(self):
"""Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of
outputs of all of the internal nodes.
Consider the following code:
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> d = c.get_internals()
>>> d
<Symbol Grouped>
>>> d.list_outputs()
['a', 'b', '_plus4_output']
Returns
-------
sgroup : Symbol
A symbol group containing all internal and leaf nodes of the computation graph
used to compute the symbol.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetInternals(
self.handle, ctypes.byref(handle)))
return Symbol(handle=handle)
def get_children(self):
"""Gets a new grouped symbol whose output contains
inputs to output nodes of the original symbol.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.Variable('z')
>>> a = y+z
>>> b = x+a
>>> b.get_children()
<Symbol Grouped>
>>> b.get_children().list_outputs()
['x', '_plus10_output']
>>> b.get_children().get_children().list_outputs()
['y', 'z']
Returns
-------
sgroup : Symbol or None
The children of the head node. If the symbol has no
inputs then ``None`` will be returned.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetChildren(
self.handle, ctypes.byref(handle)))
ret = Symbol(handle=handle)
if len(ret.list_outputs()) == 0:
return None
return ret
def list_arguments(self):
"""Lists all the arguments in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_arguments
['a', 'b']
Returns
-------
args : list of string
List containing the names of all the arguments required to compute the symbol.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListArguments(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def list_outputs(self):
"""Lists all the outputs in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_outputs()
['_plus12_output']
Returns
-------
list of str
List of all the outputs.
For most symbols, this list contains only the name of this symbol.
For symbol groups, this is a list with the names of all symbols
in the group.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListOutputs(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
# pylint: disable=invalid-length-returned
def __len__(self):
"""Get number of outputs for the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> len(c)
Returns
-------
len(self): Number of outputs
Number of outputs
"""
output_count = mx_uint()
check_call(_LIB.MXSymbolGetNumOutputs(self.handle, ctypes.byref(output_count)))
return output_count.value
def list_auxiliary_states(self):
"""Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListAuxiliaryStates(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def list_inputs(self):
"""Lists all arguments and auxiliary states of this Symbol.
Returns
-------
inputs : list of str
List of all inputs.
Examples
--------
>>> bn = mx.sym.BatchNorm(name='bn')
>>> bn.list_arguments()
['bn_data', 'bn_gamma', 'bn_beta']
>>> bn.list_auxiliary_states()
['bn_moving_mean', 'bn_moving_var']
>>> bn.list_inputs()
['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.NNSymbolListInputNames(
self.handle, 0, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]
def infer_type(self, *args, **kwargs):
"""Infers the type of all arguments and all outputs, given the known types
for some arguments.
This function takes the known types of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing types.
Inconsistencies in the known types will cause an error to be raised.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_types, out_types, aux_types = c.infer_type(a='float32')
>>> arg_types
[<type 'numpy.float32'>, <type 'numpy.float32'>]
>>> out_types
[<type 'numpy.float32'>]
>>> aux_types
[]
Parameters
----------
*args :
Type of known arguments in a positional way.
Unknown type can be marked as None.
**kwargs :
Keyword arguments of known types.
Returns
-------
arg_types : list of numpy.dtype or None
List of argument types.
The order is same as the order of list_arguments().
out_types : list of numpy.dtype or None
List of output types.
The order is same as the order of list_outputs().
aux_types : list of numpy.dtype or None
List of auxiliary state types.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_type_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_type_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, dtype in zip(arg_names, arg_shapes):
if not dtype:
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s' % (name, str(dtype)))
warnings.warn(
"Cannot decide type for the following arguments. " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_type error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise
def infer_type_partial(self, *args, **kwargs):
"""Infers the type partially.
This functions works the same way as `infer_type`,
except that this function can return partial results.
In the following example, information about fc2 is not available. So, `infer_shape`
will return a tuple of `None` values but `infer_shape_partial` will return partial values.
Example
-------
>>> data = mx.sym.Variable('data')
>>> prev = mx.sym.Variable('prev')
>>> casted_prev = mx.sym.cast(prev, dtype='float32')
>>> out = mx.sym.Activation(data=mx.sym.elemwise_add(data, casted_prev), act_type='relu')
>>> out.list_arguments()
['data', 'prev']
>>> out.infer_type(data='float32')
(None, None, None)
>>> out.infer_type_partial(data='float32')
([numpy.float32, None], [numpy.float32], [])
>>> # infers type if you give information about prev
>>> out.infer_type(data='float32', prev='float16')
([numpy.float32, numpy.float16], [numpy.float32], [])
Parameters
----------
*args :
Type of known arguments in a positional way.
Unknown type can be marked as None.
**kwargs :
Keyword arguments of known types.
Returns
-------
arg_types : list of numpy.dtype or None
List of argument types.
The order is same as the order of list_arguments().
out_types : list of numpy.dtype or None
List of output types.
The order is same as the order of list_outputs().
aux_types : list of numpy.dtype or None
List of auxiliary state types.
The order is same as the order of list_auxiliary_states().
"""
return self._infer_type_impl(True, *args, **kwargs)
def _infer_type_impl(self, partial, *args, **kwargs):
"""The actual implementation for calling type inference API."""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
types either by positional or kwargs way.')
sdata = []
if len(args) != 0:
keys = c_array(ctypes.c_char_p, [])
for s in args:
if s is not None:
s = _numpy.dtype(s).type
if s not in _DTYPE_NP_TO_MX:
raise TypeError('Argument need to be one of ' + str(_DTYPE_NP_TO_MX))
sdata.append(_DTYPE_NP_TO_MX[s])
else:
sdata.append(-1)
else:
str_keys = []
for k, v in kwargs.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
str_keys.append(k)
sdata.append(_DTYPE_NP_TO_MX[v])
keys = c_str_array(str_keys)
arg_type_size = mx_uint()
arg_type_data = ctypes.POINTER(ctypes.c_int)()
out_type_size = mx_uint()
out_type_data = ctypes.POINTER(ctypes.c_int)()
aux_type_size = mx_uint()
aux_type_data = ctypes.POINTER(ctypes.c_int)()
complete = ctypes.c_int()
if partial:
infer_func = _LIB.MXSymbolInferTypePartial
else:
infer_func = _LIB.MXSymbolInferType
check_call(infer_func(
self.handle,
mx_uint(len(sdata)),
keys,
c_array_buf(ctypes.c_int, array('i', sdata)),
ctypes.byref(arg_type_size),
ctypes.byref(arg_type_data),
ctypes.byref(out_type_size),
ctypes.byref(out_type_data),
ctypes.byref(aux_type_size),
ctypes.byref(aux_type_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_types = [
_DTYPE_MX_TO_NP[arg_type_data[i]] for i in range(arg_type_size.value)]
out_types = [
_DTYPE_MX_TO_NP[out_type_data[i]] for i in range(out_type_size.value)]
aux_types = [
_DTYPE_MX_TO_NP[aux_type_data[i]] for i in range(aux_type_size.value)]
return (arg_types, out_types, aux_types)
else:
return (None, None, None)
# pylint: enable=too-many-locals
def infer_shape(self, *args, **kwargs):
"""Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_shape_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, shape in zip(arg_names, arg_shapes):
if not shape or not _numpy.prod(shape):
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s' % (name, str(shape)))
warnings.warn(
"Cannot decide shape for the following arguments " +
"(0s in shape means unknown dimensions). " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_shape error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise
def infer_shape_partial(self, *args, **kwargs):
"""Infers the shape partially.
This functions works the same way as `infer_shape`,
except that this function can return partial results.
In the following example, information about fc2 is not available. So, `infer_shape`
will return a tuple of `None` values but `infer_shape_partial` will return partial values.
Example
-------
>>> data = mx.sym.Variable('data')
>>> prev = mx.sym.Variable('prev')
>>> fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=128)
>>> fc2 = mx.sym.FullyConnected(data=prev, name='fc2', num_hidden=128)
>>> out = mx.sym.Activation(data=mx.sym.elemwise_add(fc1, fc2), act_type='relu')
>>> out.list_arguments()
['data', 'fc1_weight', 'fc1_bias', 'prev', 'fc2_weight', 'fc2_bias']
>>> out.infer_shape(data=(10,64))
(None, None, None)
>>> out.infer_shape_partial(data=(10,64))
([(10L, 64L), (128L, 64L), (128L,), (), (), ()], [(10L, 128L)], [])
>>> # infers shape if you give information about fc2
>>> out.infer_shape(data=(10,64), prev=(10,128))
([(10L, 64L), (128L, 64L), (128L,), (10L, 128L), (128L, 128L), (128L,)], [(10L, 128L)], [])
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None
**kwargs :
Keyword arguments of known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
return self._infer_shape_impl(True, *args, **kwargs)
def _infer_shape_impl(self, partial, *args, **kwargs):
"""The actual implementation for calling shape inference API."""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
shapes either by positional or kwargs way.')
sdata = []
indptr = [0]
if len(args) != 0:
keys = c_array(ctypes.c_char_p, [])
for i, s in enumerate(args):
if s is not None:
if not isinstance(s, tuple):
raise TypeError("Arguments need to be shapes (tuple), "
"but argument %d is %s." % (i, type(s)))
sdata.extend(s)
indptr.append(len(sdata))
else:
str_keys = []
for k, v in kwargs.items():
if not isinstance(v, tuple):
raise TypeError("Arguments need to be shapes (tuple), "
"but '%s' is %s." % (k, type(v)))
str_keys.append(k)
sdata.extend(v)
indptr.append(len(sdata))
keys = c_str_array(str_keys)
arg_shape_size = mx_uint()
arg_shape_ndim = ctypes.POINTER(mx_uint)()
arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
out_shape_size = mx_uint()
out_shape_ndim = ctypes.POINTER(mx_uint)()
out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
aux_shape_size = mx_uint()
aux_shape_ndim = ctypes.POINTER(mx_uint)()
aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_uint))()
complete = ctypes.c_int()
if partial:
infer_func = _LIB.MXSymbolInferShapePartial
else:
infer_func = _LIB.MXSymbolInferShape
check_call(infer_func(
self.handle,
mx_uint(len(indptr) - 1),
keys,
c_array_buf(mx_uint, array('I', indptr)),
c_array_buf(mx_uint, array('I', sdata)),
ctypes.byref(arg_shape_size),
ctypes.byref(arg_shape_ndim),
ctypes.byref(arg_shape_data),
ctypes.byref(out_shape_size),
ctypes.byref(out_shape_ndim),
ctypes.byref(out_shape_data),
ctypes.byref(aux_shape_size),
ctypes.byref(aux_shape_ndim),
ctypes.byref(aux_shape_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_shapes = [
tuple(arg_shape_data[i][:arg_shape_ndim[i]]) for i in range(arg_shape_size.value)]
out_shapes = [
tuple(out_shape_data[i][:out_shape_ndim[i]]) for i in range(out_shape_size.value)]
aux_shapes = [
tuple(aux_shape_data[i][:aux_shape_ndim[i]]) for i in range(aux_shape_size.value)]
return (arg_shapes, out_shapes, aux_shapes)
else:
return (None, None, None)
# pylint: enable=too-many-locals
def debug_str(self):
"""Gets a debug string of symbol.
It contains Symbol output, variables and operators in the computation graph
with their inputs, variables and attributes.
Returns
-------
string
Debug string of the symbol.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.sin(a)
>>> c = 2 * a + b
>>> d = mx.sym.FullyConnected(data=c, num_hidden=10)
>>> d.debug_str()
>>> print d.debug_str()
Symbol Outputs:
output[0]=fullyconnected0(0)
Variable:a
--------------------
Op:_mul_scalar, Name=_mulscalar0
Inputs:
arg[0]=a(0) version=0
Attrs:
scalar=2
--------------------
Op:sin, Name=sin0
Inputs:
arg[0]=a(0) version=0
--------------------
Op:elemwise_add, Name=_plus0
Inputs:
arg[0]=_mulscalar0(0)
arg[1]=sin0(0)
Variable:fullyconnected0_weight
Variable:fullyconnected0_bias
--------------------
Op:FullyConnected, Name=fullyconnected0
Inputs:
arg[0]=_plus0(0)
arg[1]=fullyconnected0_weight(0) version=0
arg[2]=fullyconnected0_bias(0) version=0
Attrs:
num_hidden=10
"""
debug_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolPrint(
self.handle, ctypes.byref(debug_str)))
return py_str(debug_str.value)
def save(self, fname):
"""Saves symbol to a file.
You can also use pickle to do the job if you only work on python.
The advantage of `load`/`save` functions is that the file contents are language agnostic.
This means the model saved by one language binding can be loaded by a different
language binding of `MXNet`.
You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file.
- "s3://my-bucket/path/my-s3-symbol"
- "hdfs://my-bucket/path/my-hdfs-symbol"
- "/path-to/my-local-symbol"
See Also
--------
symbol.load : Used to load symbol from file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
check_call(_LIB.MXSymbolSaveToFile(self.handle, c_str(fname)))
def tojson(self):
"""Saves symbol to a JSON string.
See Also
--------
symbol.load_json : Used to load symbol from JSON string.
"""
json_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolSaveToJSON(self.handle, ctypes.byref(json_str)))
return py_str(json_str.value)
@staticmethod
def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing):
"""Helper function to get NDArray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input.
"""
# setup args
arg_handles = []
arg_arrays = []
if isinstance(args, list):
if len(args) != len(arg_names):
raise ValueError('Length of %s does not match the number of arguments' % arg_key)
for narr in args:
if narr is None and allow_missing:
arg_handles.append(None)
elif not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
else:
arg_handles.append(narr.handle)
arg_arrays = args
elif isinstance(args, dict):
for name in arg_names:
if name in args:
narr = args[name]
if not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays.append(narr)
else:
if allow_missing:
arg_handles.append(None)
arg_arrays.append(None)
else:
raise ValueError('key `%s` is missing in `%s`' % (name, arg_key))
else:
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
return c_array(NDArrayHandle, arg_handles), arg_arrays
# pylint: disable=too-many-locals
def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
group2ctx=None, shared_arg_names=None, shared_exec=None,
shared_buffer=None, **kwargs):
"""Bind current symbol to get an executor, allocate all the arguments needed.
Allows specifying data types.
This function simplifies the binding procedure. You need to specify only input data shapes.
Before binding the executor, the function allocates arguments and auxiliary states
that were not explicitly specified. Allows specifying data types.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.FullyConnected(x, num_hidden=4)
>>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req='null')
>>> exe.forward()
[<NDArray 5x4 @cpu(0)>]
>>> exe.outputs[0].asnumpy()
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
>>> exe.arg_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
>>> exe.grad_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
grad_req: string
{'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means every time gradient is written to specified `args_grad` NDArray.
- 'add' means every time gradient is added to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
stype_dict : Dict of str->str
Input storage type dictionary, name->storage_type
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_arg_names : List of string
The argument names whose `NDArray` of shared_exec can be reused for initializing
the current executor.
shared_exec : Executor
The executor whose arg_arrays, arg_arrays, grad_arrays, and aux_arrays can be
reused for initializing the current executor.
shared_buffer : Dict of string to `NDArray`
The dict mapping argument names to the `NDArray` that can be reused for initializing
the current executor. This buffer will be checked for reuse if one argument name
of the current executor is not found in `shared_arg_names`. The `NDArray` s are
expected have default storage type.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
The generated executor
"""
# data types
num_provided_arg_types = 0
provided_arg_type_names = ctypes.POINTER(ctypes.c_char_p)() # provided type argument names
provided_arg_type_data = ctypes.POINTER(mx_uint)() # provided types
if type_dict is not None:
provided_arg_type_names = []
provided_arg_type_data = []
for k, v in type_dict.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
provided_arg_type_names.append(k)
provided_arg_type_data.append(_DTYPE_NP_TO_MX[v])
num_provided_arg_types = mx_uint(len(provided_arg_type_names))
provided_arg_type_names = c_str_array(provided_arg_type_names)
provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data))
# storage types
num_provided_arg_stypes = 0
# provided storage type argument names
provided_arg_stype_names = ctypes.POINTER(ctypes.c_char_p)()
provided_arg_stype_data = ctypes.POINTER(mx_uint)() # provided storage types
if stype_dict is not None:
provided_arg_stype_names = []
provided_arg_stype_data = []
for k, v in stype_dict.items():
if v in _STORAGE_TYPE_STR_TO_ID:
provided_arg_stype_names.append(k)
provided_arg_stype_data.append(_STORAGE_TYPE_STR_TO_ID[v])
num_provided_arg_stypes = mx_uint(len(provided_arg_stype_names))
provided_arg_stype_names = c_str_array(provided_arg_stype_names)
provided_arg_stype_data = c_array_buf(ctypes.c_int, array('i', provided_arg_stype_data))
provided_arg_shape_data = [] # shape data
# argument shape index in sdata,
# e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg
provided_arg_shape_idx = [0]
provided_arg_shape_names = [] # provided argument names
for k, v in kwargs.items():
# if k not in listed_arguments and k not in listed_aux_states:
# raise ValueError('arg name %s is not valid', k)
if isinstance(v, tuple):
provided_arg_shape_names.append(k)
provided_arg_shape_data.extend(v)
provided_arg_shape_idx.append(len(provided_arg_shape_data))
provided_req_type_list_len = 0
provided_grad_req_types = ctypes.POINTER(ctypes.c_char_p)()
provided_grad_req_names = ctypes.POINTER(ctypes.c_char_p)()
if grad_req is not None:
if isinstance(grad_req, string_types):
# use provided_req_type_list_len = 0 to indicate this situation
provided_req_type_list_len = 0
provided_grad_req_types = [grad_req]
elif isinstance(grad_req, list):
if len(grad_req) == 0:
raise RuntimeError('grad_req in simple_bind cannot be an empty list')
provided_grad_req_types = grad_req
provided_req_type_list_len = len(provided_grad_req_types)
elif isinstance(grad_req, dict):
if len(grad_req) == 0:
raise RuntimeError('grad_req in simple_bind cannot be an empty dict')
provided_grad_req_names = []
provided_grad_req_types = []
for k, v in grad_req.items():
provided_grad_req_names.append(k)
provided_grad_req_types.append(v)
provided_grad_req_names = c_str_array(provided_grad_req_names)
provided_req_type_list_len = len(provided_grad_req_types)
provided_grad_req_types = c_str_array(provided_grad_req_types)
num_ctx_map_keys = mx_uint(0)
ctx_map_keys = ctypes.POINTER(ctypes.c_char_p)()
ctx_map_dev_types = ctypes.POINTER(ctypes.c_int)()
ctx_map_dev_ids = ctypes.POINTER(ctypes.c_int)()
if group2ctx is not None:
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
for key, val in group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
num_ctx_map_keys = mx_uint(len(ctx_map_keys))
ctx_map_keys = c_str_array(ctx_map_keys)
ctx_map_dev_types = c_array(ctypes.c_int, array('i', ctx_map_dev_types))
ctx_map_dev_ids = c_array(ctypes.c_int, array('i', ctx_map_dev_ids))
# prepare param names
shared_arg_name_list = []
if shared_arg_names is not None:
if not isinstance(shared_arg_names, list):
raise ValueError('shared_arg_names in simple_bind must be a list or None')
shared_arg_name_list = shared_arg_names
# prepare shared_buffer
if shared_buffer is None:
shared_buffer_len = ctypes.c_int(-1)
shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
else:
if not isinstance(shared_buffer, dict):
raise ValueError('shared_buffer in simple_bind must be dict or None')
buffer_names = shared_buffer.keys()
buffer_arrays = shared_buffer.values()
for v in buffer_arrays:
assert(v.stype == 'default'), \
"shared_buffer is expected to only contain NDArrays with default storage"
shared_buffer_names = c_str_array(buffer_names)
shared_buffer_len = ctypes.c_int(len(buffer_arrays))
shared_buffer_handles = c_handle_array(buffer_arrays)
updated_shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
updated_shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
# prepare shared_exec_handle
shared_exec_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
# prepare current executor handle
exe_handle = ExecutorHandle()
# prepare current executor's in_args, arg_grads, and aux_states
num_in_args = ctypes.c_uint()
in_arg_handles = ctypes.POINTER(NDArrayHandle)()
arg_grad_handles = ctypes.POINTER(NDArrayHandle)()
num_aux_states = ctypes.c_uint()
aux_state_handles = ctypes.POINTER(NDArrayHandle)()
try:
check_call(_LIB.MXExecutorSimpleBind(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
num_ctx_map_keys,
ctx_map_keys,
ctx_map_dev_types,
ctx_map_dev_ids,
mx_uint(provided_req_type_list_len),
provided_grad_req_names,
provided_grad_req_types,
mx_uint(len(provided_arg_shape_names)),
c_str_array(provided_arg_shape_names),
c_array_buf(mx_uint,
array('I', provided_arg_shape_data)),
c_array_buf(mx_uint,
array('I', provided_arg_shape_idx)),
num_provided_arg_types,
provided_arg_type_names,
provided_arg_type_data,
num_provided_arg_stypes,
provided_arg_stype_names,
provided_arg_stype_data,
mx_uint(len(shared_arg_name_list)),
c_str_array(shared_arg_name_list),
ctypes.byref(shared_buffer_len),
shared_buffer_names,
shared_buffer_handles,
ctypes.byref(updated_shared_buffer_names),
ctypes.byref(updated_shared_buffer_handles),
ctypes.byref(num_in_args),
ctypes.byref(in_arg_handles),
ctypes.byref(arg_grad_handles),
ctypes.byref(num_aux_states),
ctypes.byref(aux_state_handles),
shared_exec_handle,
ctypes.byref(exe_handle)))
except MXNetError as e:
error_msg = "simple_bind error. Arguments:\n"
for k, v in kwargs.items():
error_msg += "%s: %s\n" % (k, v)
error_msg += "%s" % e
raise RuntimeError(error_msg)
# update shared_buffer
if shared_buffer is not None:
for i in range(shared_buffer_len.value):
k = py_str(updated_shared_buffer_names[i])
v = NDArray(NDArrayHandle(updated_shared_buffer_handles[i]))
shared_buffer[k] = v
# create in_args, arg_grads, and aux_states for the current executor
arg_arrays = [_ndarray_cls(NDArrayHandle(in_arg_handles[i]))
for i in range(num_in_args.value)]
grad_arrays = [_ndarray_cls(NDArrayHandle(arg_grad_handles[i]))
if arg_grad_handles[i] is not None
else None for i in range(num_in_args.value)]
aux_arrays = [_ndarray_cls(NDArrayHandle(aux_state_handles[i]))
for i in range(num_aux_states.value)]
executor = Executor(exe_handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = arg_arrays
executor.grad_arrays = grad_arrays
executor.aux_arrays = aux_arrays
return executor
def bind(self, ctx, args, args_grad=None, grad_req='write',
aux_states=None, group2ctx=None, shared_exec=None):
"""Binds the current symbol to an executor and returns it.
We first declare the computation and then bind to the data to run.
This function returns an executor which provides method `forward()` method for evaluation
and a `outputs()` method to get all the results.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
<Symbol _plus1>
>>> ex = c.bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})
>>> ex.forward()
[<NDArray 2x3 @cpu(0)>]
>>> ex.outputs[0].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbol.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding `NDArray`.
- In either case, all the arguments must be provided.
args_grad : list of NDArray or dict of str to `NDArray`, optional
When specified, `args_grad` provides NDArrays to hold
the result of gradient value in backward.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding NDArray.
- When the type is a dict of str to `NDArray`, one only need to provide the dict
for required argument gradient.
Only the specified argument gradient will be calculated.
grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means everytime gradient is write to specified `args_grad` `NDArray`.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
aux_states : list of `NDArray`, or dict of str to `NDArray`, optional
Input auxiliary states to the symbol, only needed when the output of
`list_auxiliary_states()` is not empty.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_auxiliary_states()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of
`auxiliary_states` to the corresponding `NDArray`,
- In either case, all the auxiliary states need to be provided.
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_exec : mx.executor.Executor
Executor to share memory with. This is intended for runtime reshaping, variable length
sequences, etc. The returned executor shares state with `shared_exec`, and should not be
used in parallel with it.
Returns
-------
executor : Executor
The generated executor
Notes
-----
Auxiliary states are the special states of symbols that do not correspond
to an argument, and do not have gradient but are still useful
for the specific operations. Common examples of auxiliary states include
the `moving_mean` and `moving_variance` states in `BatchNorm`.
Most operators do not have auxiliary states and in those cases,
this parameter can be safely ignored.
One can give up gradient by using a dict in `args_grad` and only specify
gradient they interested in.
"""
# pylint: disable=too-many-locals, too-many-branches
if not isinstance(ctx, Context):
raise TypeError("Context type error")
listed_arguments = self.list_arguments()
args_handle, args = self._get_ndarray_inputs('args', args, listed_arguments, False)
# setup args gradient
if args_grad is None:
args_grad_handle = c_array(NDArrayHandle, [None] * len(args))
else:
args_grad_handle, args_grad = self._get_ndarray_inputs(
'args_grad', args_grad, listed_arguments, True)
if aux_states is None:
aux_states = []
aux_args_handle, aux_states = self._get_ndarray_inputs(
'aux_states', aux_states, self.list_auxiliary_states(), False)
# setup requirements
if isinstance(grad_req, string_types):
if grad_req not in _GRAD_REQ_MAP:
raise ValueError('grad_req must be in %s' % str(_GRAD_REQ_MAP))
reqs_array = c_array_buf(mx_uint,
array('I', [_GRAD_REQ_MAP[grad_req]] * len(listed_arguments)))
elif isinstance(grad_req, list):
reqs_array = c_array_buf(mx_uint,
array('I', [_GRAD_REQ_MAP[item] for item in grad_req]))
elif isinstance(grad_req, dict):
req_array = []
for name in listed_arguments:
if name in grad_req:
req_array.append(_GRAD_REQ_MAP[grad_req[name]])
else:
req_array.append(0)
reqs_array = c_array_buf(mx_uint, array('I', req_array))
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
if group2ctx:
for key, val in group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
handle = ExecutorHandle()
shared_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
check_call(_LIB.MXExecutorBindEX(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
mx_uint(len(ctx_map_keys)),
c_str_array(ctx_map_keys),
c_array_buf(ctypes.c_int, array('i', ctx_map_dev_types)),
c_array_buf(ctypes.c_int, array('i', ctx_map_dev_ids)),
mx_uint(len(args)),
args_handle,
args_grad_handle,
reqs_array,
mx_uint(len(aux_states)),
aux_args_handle,
shared_handle,
ctypes.byref(handle)))
executor = Executor(handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = args
executor.grad_arrays = args_grad
executor.aux_arrays = aux_states
return executor
def gradient(self, wrt):
"""Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
"""
handle = SymbolHandle()
c_wrt = c_str_array(wrt)
check_call(_LIB.MXSymbolGrad(self.handle,
mx_uint(len(wrt)),
c_wrt,
ctypes.byref(handle)))
return Symbol(handle)
# pylint: enable= no-member
def eval(self, ctx=None, **kwargs):
"""Evaluates a symbol given arguments.
The `eval` method combines a call to `bind` (which returns an executor)
with a call to `forward` (executor method).
For the common use case, where you might repeatedly evaluate with same arguments,
eval is slow.
In that case, you should call `bind` once and then repeatedly call forward.
This function allows simpler syntax for less cumbersome introspection.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
>>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))
>>> ex
[<NDArray 2x3 @cpu(0)>]
>>> ex[0].asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
Parameters
----------
ctx : Context
The device context the generated executor to run on.
kwargs : Keyword arguments of type `NDArray`
Input arguments to the symbol. All the arguments must be provided.
Returns
----------
result : a list of NDArrays corresponding to the values taken by each symbol when
evaluated on given args. When called on a single symbol (not a group),
the result will be a list with one element.
"""
if ctx is None:
ctx = current_context()
return self.bind(ctx, kwargs).forward()
def reshape(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape`.
The arguments are the same as for :py:func:`reshape`, with
this array as data.
"""
return op.reshape(self, *args, **kwargs)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
return op.reshape_like(self, *args, **kwargs)
def astype(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cast`.
The arguments are the same as for :py:func:`cast`, with
this array as data.
"""
return op.cast(self, *args, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
return op.zeros_like(self, *args, **kwargs)
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
return op.ones_like(self, *args, **kwargs)
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
return op.broadcast_axes(self, *args, **kwargs)
def repeat(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`repeat`.
The arguments are the same as for :py:func:`repeat`, with
this array as data.
"""
return op.repeat(self, *args, **kwargs)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
return op.pad(self, *args, **kwargs)
def swapaxes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`swapaxes`.
The arguments are the same as for :py:func:`swapaxes`, with
this array as data.
"""
return op.swapaxes(self, *args, **kwargs)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
return op.split(self, *args, **kwargs)
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
return split_v2(self, *args, **kwargs)
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
return op.slice(self, *args, **kwargs)
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
return op.slice_axis(self, *args, **kwargs)
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
return op.slice_like(self, *args, **kwargs)
def take(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return op.take(self, *args, **kwargs)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
return op.one_hot(self, *args, **kwargs)
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
return op.pick(self, *args, **kwargs)
def sort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
return op.sort(self, *args, **kwargs)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
return op.topk(self, *args, **kwargs)
def argsort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return op.argsort(self, *args, **kwargs)
def argmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax`.
The arguments are the same as for :py:func:`argmax`, with
this array as data.
"""
return op.argmax(self, *args, **kwargs)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
return op.argmax_channel(self, *args, **kwargs)
def argmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmin`.
The arguments are the same as for :py:func:`argmin`, with
this array as data.
"""
return op.argmin(self, *args, **kwargs)
def clip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`clip`.
The arguments are the same as for :py:func:`clip`, with
this array as data.
"""
return op.clip(self, *args, **kwargs)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
return op.abs(self, *args, **kwargs)
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
return op.sign(self, *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flatten`.
The arguments are the same as for :py:func:`flatten`, with
this array as data.
"""
return op.flatten(self, *args, **kwargs)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_op`, with
this array as data.
"""
return op.shape_array(self, *args, **kwargs)
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
return op.size_array(self, *args, **kwargs)
def expand_dims(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
return op.expand_dims(self, *args, **kwargs)
def broadcast_to(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_to`.
The arguments are the same as for :py:func:`broadcast_to`, with
this array as data.
"""
return op.broadcast_to(self, *args, **kwargs)
def broadcast_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_like`.
The arguments are the same as for :py:func:`broadcast_like`, with
this array as data.
"""
return op.broadcast_like(self, *args, **kwargs)
def tile(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tile`.
The arguments are the same as for :py:func:`tile`, with
this array as data.
"""
return op.tile(self, *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`transpose`.
The arguments are the same as for :py:func:`transpose`, with
this array as data.
"""
return op.transpose(self, *args, **kwargs)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
return op.flip(self, *args, **kwargs)
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
return op.depth_to_space(self, *args, **kwargs)
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
return op.space_to_depth(self, *args, **kwargs)
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
return op.diag(self, k, **kwargs)
def sum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sum`.
The arguments are the same as for :py:func:`sum`, with
this array as data.
"""
return op.sum(self, *args, **kwargs)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
return op.nansum(self, *args, **kwargs)
def prod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`prod`.
The arguments are the same as for :py:func:`prod`, with
this array as data.
"""
return op.prod(self, *args, **kwargs)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
return op.nanprod(self, *args, **kwargs)
def mean(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`mean`.
The arguments are the same as for :py:func:`mean`, with
this array as data.
"""
return op.mean(self, *args, **kwargs)
def max(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`max`.
The arguments are the same as for :py:func:`max`, with
this array as data.
"""
return op.max(self, *args, **kwargs)
def min(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return op.min(self, *args, **kwargs)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
return op.norm(self, *args, **kwargs)
def round(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return op.round(self, *args, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
return op.rint(self, *args, **kwargs)
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
return op.fix(self, *args, **kwargs)
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
return op.floor(self, *args, **kwargs)
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
return op.ceil(self, *args, **kwargs)
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
return op.trunc(self, *args, **kwargs)
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
return op.sin(self, *args, **kwargs)
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
return op.cos(self, *args, **kwargs)
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
return op.tan(self, *args, **kwargs)
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
return op.arcsin(self, *args, **kwargs)
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
return op.arccos(self, *args, **kwargs)
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
return op.arctan(self, *args, **kwargs)
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
return op.degrees(self, *args, **kwargs)
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
return op.radians(self, *args, **kwargs)
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
return op.sinh(self, *args, **kwargs)
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
return op.cosh(self, *args, **kwargs)
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
return op.tanh(self, *args, **kwargs)
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
return op.arcsinh(self, *args, **kwargs)
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
return op.arccosh(self, *args, **kwargs)
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
return op.arctanh(self, *args, **kwargs)
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
return op.exp(self, *args, **kwargs)
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
return op.expm1(self, *args, **kwargs)
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
return op.log(self, *args, **kwargs)
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
return op.log10(self, *args, **kwargs)
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
return op.log2(self, *args, **kwargs)
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
return op.log1p(self, *args, **kwargs)
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
return op.sqrt(self, *args, **kwargs)
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
return op.rsqrt(self, *args, **kwargs)
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
return op.cbrt(self, *args, **kwargs)
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
return op.rcbrt(self, *args, **kwargs)
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
return op.square(self, *args, **kwargs)
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
return op.reciprocal(self, *args, **kwargs)
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
return op.relu(self, *args, **kwargs)
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
return op.sigmoid(self, *args, **kwargs)
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
return op.softmax(self, *args, **kwargs)
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
return op.log_softmax(self, *args, **kwargs)
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
return op.softmin(self, *args, **kwargs)
def squeeze(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`squeeze`.
The arguments are the same as for :py:func:`squeeze`, with
this array as data.
"""
return op.squeeze(self, *args, **kwargs)
def get_backend_symbol(self, backend):
"""Return symbol for target backend.
Parameters
----------
backend : str
The backend names.
Returns
-------
out : Symbol
The created Symbol for target backend.
"""
out = SymbolHandle()
check_call(_LIB.MXGenBackendSubgraph(self.handle, c_str(backend), ctypes.byref(out)))
return Symbol(out)
def wait_to_read(self):
raise NotImplementedForSymbol(self.wait_to_read, None)
def asnumpy(self):
raise NotImplementedForSymbol(self.asnumpy, None)
def asscalar(self):
raise NotImplementedForSymbol(self.asscalar, None)
def copy(self):
raise NotImplementedForSymbol(self.copy, None)
def as_in_context(self):
raise NotImplementedForSymbol(self.as_in_context, None)
def detach(self):
raise NotImplementedForSymbol(self.detach, None)
def backward(self):
raise NotImplementedForSymbol(self.backward, None)
def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None,
init=None, stype=None, **kwargs):
"""Creates a symbolic variable with specified name.
Example
-------
>>> data = mx.sym.Variable('data', attr={'a': 'b'})
>>> data
<Symbol data>
>>> csr_data = mx.sym.Variable('csr_data', stype='csr')
>>> csr_data
<Symbol csr_data>
>>> row_sparse_weight = mx.sym.Variable('weight', stype='row_sparse')
>>> row_sparse_weight
<Symbol weight>
Parameters
----------
name : str
Variable name.
attr : Dict of strings
Additional attributes to set on the variable. Format {string : string}.
shape : tuple
The shape of a variable. If specified, this will be used during the shape inference.
If one has specified a different shape for this variable using
a keyword argument when calling shape inference, this shape information will be ignored.
lr_mult : float
The learning rate multiplier for input variable.
wd_mult : float
Weight decay multiplier for input variable.
dtype : str or numpy.dtype
The dtype for input variable. If not specified, this value will be inferred.
init : initializer (mxnet.init.*)
Initializer for this variable to (optionally) override the default initializer.
stype : str
The storage type of the variable, such as 'row_sparse', 'csr', 'default', etc
kwargs : Additional attribute variables
Additional attributes must start and end with double underscores.
Returns
-------
variable : Symbol
A symbol corresponding to an input to the computation graph.
"""
if not isinstance(name, string_types):
raise TypeError('Expect a string for variable `name`')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle)))
ret = Symbol(handle)
if not hasattr(AttrScope._current, "value"):
AttrScope._current.value = AttrScope()
attr = AttrScope._current.value.get(attr)
attr = {} if attr is None else attr
if shape is not None:
attr['__shape__'] = str(shape)
if lr_mult is not None:
attr['__lr_mult__'] = str(lr_mult)
if wd_mult is not None:
attr['__wd_mult__'] = str(wd_mult)
if dtype is not None:
attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type])
if init is not None:
if not isinstance(init, string_types):
init = init.dumps()
attr['__init__'] = init
if stype is not None:
attr['__storage_type__'] = str(_STORAGE_TYPE_STR_TO_ID[stype])
for k, v in kwargs.items():
if k.startswith('__') and k.endswith('__'):
attr[k] = str(v)
else:
raise ValueError('Attribute name=%s is not supported.'
' Additional attributes must start and end with double underscores,'
' e.g, __yourattr__' % k)
ret._set_attr(**attr)
return ret
# for back compatibility
Variable = var
def Group(symbols):
"""Creates a symbol that contains a collection of other symbols, grouped together.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> mx.sym.Group([a,b])
<Symbol Grouped>
Parameters
----------
symbols : list
List of symbols to be grouped.
Returns
-------
sym : Symbol
A group symbol.
"""
if not symbols or any(not isinstance(sym, Symbol) for sym in symbols):
raise TypeError('Expected a list of symbols as input')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateGroup(
mx_uint(len(symbols)),
c_handle_array(symbols), ctypes.byref(handle)))
return Symbol(handle)
def load(fname):
"""Loads symbol from a JSON file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file, examples:
- `s3://my-bucket/path/my-s3-symbol`
- `hdfs://my-bucket/path/my-hdfs-symbol`
- `/path-to/my-local-symbol`
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.save : Used to save symbol into file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))
return Symbol(handle)
def load_json(json_str):
"""Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('fname required to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return Symbol(handle)
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def pow(base, exp):
"""Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32)
"""
if isinstance(base, Symbol) and isinstance(exp, Symbol):
return _internal._Power(base, exp)
if isinstance(base, Symbol) and isinstance(exp, Number):
return _internal._PowerScalar(base, scalar=exp)
if isinstance(base, Number) and isinstance(exp, Symbol):
return _internal._RPowerScalar(exp, scalar=base)
if isinstance(base, Number) and isinstance(exp, Number):
return base**exp
else:
raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def maximum(left, right):
"""Returns element-wise maximum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise maximum of the input symbols.
Examples
--------
>>> mx.sym.maximum(2, 3.5)
3.5
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.maximum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 4., 5., 4., 10.], dtype=float32)
>>> z = mx.sym.maximum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 10., 4.], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Maximum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MaximumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MaximumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left > right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def minimum(left, right):
"""Returns element-wise minimum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise minimum of the input symbols.
Examples
--------
>>> mx.sym.minimum(2, 3.5)
2
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.minimum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 3., 4., 2., 4.], dtype=float32)
>>> z = mx.sym.minimum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 3., 2.], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Minimum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MinimumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MinimumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left < right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
# pylint: disable=no-member
# pylint: disable=redefined-builtin
def hypot(left, right):
"""Given the "legs" of a right triangle, returns its hypotenuse.
Equivalent to :math:`\\sqrt(left^2 + right^2)`, element-wise.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First leg of the triangle(s).
right : Symbol or scalar
Second leg of the triangle(s).
Returns
-------
Symbol or scalar
The hypotenuse of the triangle(s)
Examples
--------
>>> mx.sym.hypot(3, 4)
5.0
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.hypot(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2]))[0].asnumpy()
array([ 5., 6.40312433, 4.47213602], dtype=float32)
>>> z = mx.sym.hypot(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 10.44030666, 4.47213602], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Hypot(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._HypotScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._HypotScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return _numpy.hypot(left, right)
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
def eye(N, M=0, k=0, dtype=None, **kwargs):
"""Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._eye(N, M, k, dtype=dtype, **kwargs)
def zeros(shape, dtype=None, **kwargs):
"""Returns a new symbol of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._zeros(shape=shape, dtype=dtype, **kwargs)
def ones(shape, dtype=None, **kwargs):
"""Returns a new symbol of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._ones(shape=shape, dtype=dtype, **kwargs)
def full(shape, val, dtype=None, **kwargs):
"""Returns a new array of given shape and type, filled with the given value `val`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
val : scalar
Fill value.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._full(shape=shape, dtype=dtype, value=float(val), **kwargs)
# pylint: disable=redefined-outer-name
def arange(start, stop=None, step=1.0, repeat=1, infer_range=False, name=None, dtype=None):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns a `Symbol`.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default start value is 0.
stop : number
End of interval. The interval does not include this value.
step : number, optional
Spacing between values.
repeat : int, optional
"The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol
"""
if dtype is None:
dtype = _numpy.float32
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=infer_range, name=name, dtype=dtype)
def histogram(a, bins=10, range=None, **kwargs):
"""Compute the histogram of the input data.
Parameters
----------
a : NDArray
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars
If bins is an int, it defines the number of equal-width bins in the
given range (10, by default). If bins is a sequence, it defines the bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), required if bins is an integer
The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()).
Values outside the range are ignored. The first element of the range must be less than or
equal to the second. range affects the automatic bin computation as well, the range will
be equally divided by the number of bins.
Returns
-------
out : Symbol
The created Symbol
"""
if isinstance(bins, Symbol):
return _internal._histogram(data=a, bins=bins, **kwargs)
elif isinstance(bins, integer_types):
if range is None:
raise ValueError("null range is not supported in symbol mode")
return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs)
raise ValueError("bins argument should be either an integer or an NDArray")
def split_v2(ary, indices_or_sections, axis=0, squeeze_axis=False):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : NDArray
Array to be divided into sub-arrays.
indices_or_sections : int or tuple of ints
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
squeeze_axis: boolean, optional
Whether to squeeze the axis of sub-arrays or not, only useful when size
of the sub-arrays are 1 on the `axis`. Default is False.
Returns
-------
out : Symbol
The created Symbol
"""
indices = []
sections = 0
if isinstance(indices_or_sections, int):
sections = indices_or_sections
elif isinstance(indices_or_sections, tuple):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must either int or tuple of ints')
return _internal._split_v2(ary, indices, axis, squeeze_axis, sections)
_set_symbol_class(Symbol)
| {
"content_hash": "bb2d0db68e24ccffd848192e2ea7ff34",
"timestamp": "",
"source": "github",
"line_count": 3088,
"max_line_length": 100,
"avg_line_length": 36.21794041450777,
"alnum_prop": 0.553902415035631,
"repo_name": "dmlc/mxnet",
"id": "0c0a0a1e3c885a5f6dff1b773be049d2da574503",
"size": "112778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/symbol/symbol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "85580"
},
{
"name": "C++",
"bytes": "3227650"
},
{
"name": "CMake",
"bytes": "48546"
},
{
"name": "Cuda",
"bytes": "567360"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "16368"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40096"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "615878"
},
{
"name": "Perl 6",
"bytes": "21993"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "3164782"
},
{
"name": "R",
"bytes": "284084"
},
{
"name": "Scala",
"bytes": "862528"
},
{
"name": "Shell",
"bytes": "110890"
}
],
"symlink_target": ""
} |
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_8_1_1(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '8 1 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "612b1eefe0fe8945195f7389f264f11a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 19.25,
"alnum_prop": 0.6688311688311688,
"repo_name": "turon/openthread",
"id": "32b09f7407b5f03a3ae0a5805d6b53dacb1f75a5",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/harness-automation/cases/commissioner_8_1_1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "50"
},
{
"name": "C",
"bytes": "1034514"
},
{
"name": "C++",
"bytes": "4480499"
},
{
"name": "Dockerfile",
"bytes": "6306"
},
{
"name": "M4",
"bytes": "36666"
},
{
"name": "Makefile",
"bytes": "138336"
},
{
"name": "Python",
"bytes": "2160064"
},
{
"name": "Shell",
"bytes": "73650"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, -1].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder_X_1 = LabelEncoder()
X[:,1] = labelEncoder_X_1.fit_transform(X[:,1])
labelEncoder_X_2 = LabelEncoder()
X[:,2] = labelEncoder_X_2.fit_transform(X[:,2])
onehotencoder = OneHotEncoder(categorical_features=[1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# ANN does require Feature Scaling compulsorily
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Part 2 ANN
# Importing libraries
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising ANN
classifier = Sequential()
#1st layer
classifier.add(Dense(output_dim = 6, init ='uniform', activation= 'relu', input_dim= 11 ))
# 2nd layer
classifier.add(Dense(output_dim = 6, init ='uniform', activation= 'relu' ))
#3rd layer
classifier.add(Dense(output_dim = 1, init ='uniform', activation= 'sigmoid'))
# if more than 2 categories activation fun = softmax output_dim = no of categories
#compile the classifier
# loss 2= = binary _crossentropy, >2 categorical_crossentropy
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics =['accuracy'])
classifier.fit(X_train, y_train, batch_size= 10, nb_epoch = 100)
y_pred = classifier.predict(X_test)
# Part 3 Evaluating the model
y_pred = (y_pred > 0.5)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
| {
"content_hash": "209ad5a4653a4e08772bab4cde13b97a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 92,
"avg_line_length": 29.681159420289855,
"alnum_prop": 0.74169921875,
"repo_name": "jigargandhi/UdemyMachineLearning",
"id": "05e33ce45e6065acf2a353f5f55c045a3fd39334",
"size": "2074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Machine Learning A-Z Template Folder/Part 8 - Deep Learning/Section 39 - Artificial Neural Networks (ANN)/j_ann.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157569"
},
{
"name": "R",
"bytes": "74375"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AnonymEmail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.CharField(max_length=64)),
('date', models.DateTimeField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'ordering': ['-date'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='anonymemail',
unique_together=set([('user', 'email')]),
),
]
| {
"content_hash": "dd7168a749d14decc061c902de8d7a82",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 31.724137931034484,
"alnum_prop": 0.5489130434782609,
"repo_name": "cjlee112/socraticqs2",
"id": "1255022853acecbb3ebc15bcb0cbb55950fa3bd6",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/psa/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138226"
},
{
"name": "Dockerfile",
"bytes": "3865"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "467395"
},
{
"name": "JavaScript",
"bytes": "234788"
},
{
"name": "Makefile",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "1785754"
},
{
"name": "Shell",
"bytes": "2889"
}
],
"symlink_target": ""
} |
Subsets and Splits