code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment)
| espressomd/espresso | maintainer/gh_post_style_patch.py | Python | gpl-3.0 | 2,704 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: false
choices: ['present', 'absent']
default: present
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
placement_group:
description:
- Physical location of your cluster placement group created in Amazon EC2.
required: false
version_added: "2.3"
default: None
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in conjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained.
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
notification_topic:
description:
- A SNS topic ARN to send auto scaling notifications to.
default: None
required: false
version_added: "2.2"
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR']
required: false
version_added: "2.2"
suspend_processes:
description:
- A list of scaling processes to suspend.
required: False
default: []
choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
version_added: "2.3"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
import traceback
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
# objects and replace them with a dict-representation. Needed because the
# tags are included in ansible's return value (which is jsonified)
if 'tags' in properties and isinstance(properties['tags'], list):
serializable_tags = {}
for tag in properties['tags']:
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
properties['tags'] = serializable_tags
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = {}
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
else:
return
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = set()
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstance':
return None
module.fail_json(msg=str(e))
for i in lb_instances:
if i.state == "InService":
healthy_instances.add(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider instances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def suspend_processes(as_group, module):
suspend_processes = set(module.params.get('suspend_processes'))
try:
suspended_processes = set([p.process_name for p in as_group.suspended_processes])
except AttributeError:
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
if suspend_processes == suspended_processes:
return False
resume_processes = list(suspended_processes - suspend_processes)
if resume_processes:
as_group.resume_processes(resume_processes)
if suspend_processes:
as_group.suspend_processes(list(suspend_processes))
return True
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
notification_topic = module.params.get('notification_topic')
notification_types = module.params.get('notification_types')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k,v in tag.items():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
if len(launch_configs) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
placement_group=placement_group,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type,
default_cooldown=default_cooldown,
termination_policies=termination_policies)
try:
connection.create_auto_scaling_group(ag)
suspend_processes(ag, module)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
if notification_topic:
ag.put_notification_configuration(notification_topic, notification_types)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError as e:
module.fail_json(msg="Failed to create Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
else:
as_group = as_groups[0]
changed = False
if suspend_processes(as_group, module):
changed = True
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
if attr != 'termination_policies':
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
have_tags = {}
want_tags = {}
for tag in asg_tags:
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
if tag.key not in want_tags:
changed = True
dead_tags.append(tag)
if dead_tags != []:
connection.delete_tags(dead_tags)
if have_tags != want_tags:
changed = True
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
if changed:
try:
as_group.update()
except BotoServerError as e:
module.fail_json(msg="Failed to update Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
if notification_topic:
try:
as_group.put_notification_configuration(notification_topic, notification_types)
except BotoServerError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications: %s" % str(e), exception=traceback.format_exc(e))
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups: %s" % str(e), exception=traceback.format_exc(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
if notification_topic:
ag.delete_notification_configuration(notification_topic)
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(connection.get_all_groups(names=[group_name])):
time.sleep(5)
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
if max_size is None:
max_size = as_group.max_size
if desired_capacity is None:
desired_capacity = as_group.desired_capacity
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[])
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
if __name__ == '__main__':
main()
| zahodi/ansible | lib/ansible/modules/cloud/amazon/ec2_asg.py | Python | gpl-3.0 | 38,091 |
import lxml.html as l
import requests
def key_char_parse(char_id):
url = 'https://vndb.org/c' + str(char_id)
page = requests.get(url)
root = l.fromstring(page.text)
name = root.cssselect('.mainbox h1')[0].text
kanji_name = root.cssselect('.mainbox h2.alttitle')[0].text
img = 'https:' + root.cssselect('.mainbox .charimg img')[0].attrib['src']
gender = root.cssselect('.chardetails table thead tr td abbr')[0].attrib['title']
try:
bloodtype = root.cssselect('.chardetails table thead tr td span')[0].text
except IndexError:
bloodtype = None
table = root.cssselect('.chardetails table')[0]
for row in table:
if row.tag == 'tr':
if len(row) == 2:
try:
key = row[0][0].text
except IndexError:
key = row[0].text
value = None
try:
if row[1][0].tag == 'a':
value = row[1][0].text
else:
value = []
for span in row[1]:
if 'charspoil_1' in span.classes:
tag = 'minor spoiler'
elif 'charspoil_2' in span.classes:
tag = 'spoiler'
elif 'sexual' in span.classes:
tag = 'sexual trait'
else:
tag = None
value.append({'value': span[1].text, 'tag': tag})
except IndexError:
value = row[1].text
if key == 'Visual novels':
value = []
for span in row[1]:
if span.tag == 'span':
value.append(span.text + span[0].text)
desc = root.cssselect('.chardetails table td.chardesc')[0][1].text
character = {
'URL': url,
'Name': name,
'Name_J': kanji_name,
'Image': img,
'Gender': gender,
'Blood_Type': bloodtype,
'Description': desc
}
return character
| aurora-pro/apex-sigma | sigma/plugins/fun/vn_char.py | Python | gpl-3.0 | 2,210 |
# -*- coding: utf-8 -*-
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.contrib import admin
from models import *
class ProfileAdmin(admin.ModelAdmin):
list_display = ('screen_name','city','introduction')
admin.site.register(UserProfile,ProfileAdmin) | yohn89/pythoner.net | pythoner/accounts/admin.py | Python | gpl-3.0 | 915 |
from django.conf.urls.defaults import *
from indivo.views import *
from indivo.lib.utils import MethodDispatcher
urlpatterns = patterns('',
(r'^$', MethodDispatcher({
'DELETE' : carenet_delete})),
(r'^/rename$', MethodDispatcher({
'POST' : carenet_rename})),
(r'^/record$', MethodDispatcher({'GET':carenet_record})),
# Manage documents
(r'^/documents/', include('indivo.urls.carenet_documents')),
# Manage accounts
(r'^/accounts/$',
MethodDispatcher({
'GET' : carenet_account_list,
'POST' : carenet_account_create
})),
(r'^/accounts/(?P<account_id>[^/]+)$',
MethodDispatcher({ 'DELETE' : carenet_account_delete })),
# Manage apps
(r'^/apps/$',
MethodDispatcher({ 'GET' : carenet_apps_list})),
(r'^/apps/(?P<pha_email>[^/]+)$',
MethodDispatcher({ 'PUT' : carenet_apps_create,
'DELETE': carenet_apps_delete})),
# Permissions Calls
(r'^/accounts/(?P<account_id>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_account_permissions })),
(r'^/apps/(?P<pha_email>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_app_permissions })),
# Reporting Calls
(r'^/reports/minimal/procedures/$',
MethodDispatcher({'GET':carenet_procedure_list})),
(r'^/reports/minimal/simple-clinical-notes/$',
MethodDispatcher({'GET':carenet_simple_clinical_notes_list})),
(r'^/reports/minimal/equipment/$',
MethodDispatcher({'GET':carenet_equipment_list})),
(r'^/reports/minimal/measurements/(?P<lab_code>[^/]+)/$',
MethodDispatcher({'GET':carenet_measurement_list})),
(r'^/reports/(?P<data_model>[^/]+)/$',
MethodDispatcher({'GET':carenet_generic_list})),
# Demographics
(r'^/demographics$', MethodDispatcher({'GET': read_demographics_carenet})),
)
| sayan801/indivo_server | indivo/urls/carenet.py | Python | gpl-3.0 | 1,997 |
# coding: utf-8
import json
import logging
import dateutil.parser
import pytz
from werkzeug import urls
from odoo import api, fields, models, _
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment_paypal.controllers.main import PaypalController
from odoo.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerPaypal(models.Model):
_inherit = 'payment.acquirer'
provider = fields.Selection(selection_add=[('paypal', 'Paypal')])
paypal_email_account = fields.Char('Paypal Email ID', required_if_provider='paypal', groups='base.group_user')
paypal_seller_account = fields.Char(
'Paypal Merchant ID', groups='base.group_user',
help='The Merchant ID is used to ensure communications coming from Paypal are valid and secured.')
paypal_use_ipn = fields.Boolean('Use IPN', default=True, help='Paypal Instant Payment Notification', groups='base.group_user')
paypal_pdt_token = fields.Char(string='Paypal PDT Token', help='Payment Data Transfer allows you to receive notification of successful payments as they are made.', groups='base.group_user')
# Server 2 server
paypal_api_enabled = fields.Boolean('Use Rest API', default=False)
paypal_api_username = fields.Char('Rest API Username', groups='base.group_user')
paypal_api_password = fields.Char('Rest API Password', groups='base.group_user')
paypal_api_access_token = fields.Char('Access Token', groups='base.group_user')
paypal_api_access_token_validity = fields.Datetime('Access Token Validity', groups='base.group_user')
# Default paypal fees
fees_dom_fixed = fields.Float(default=0.35)
fees_dom_var = fields.Float(default=3.4)
fees_int_fixed = fields.Float(default=0.35)
fees_int_var = fields.Float(default=3.9)
def _get_feature_support(self):
"""Get advanced feature support by provider.
Each provider should add its technical in the corresponding
key for the following features:
* fees: support payment fees computations
* authorize: support authorizing payment (separates
authorization and capture)
* tokenize: support saving payment data in a payment.tokenize
object
"""
res = super(AcquirerPaypal, self)._get_feature_support()
res['fees'].append('paypal')
return res
@api.model
def _get_paypal_urls(self, environment):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
}
@api.multi
def paypal_compute_fees(self, amount, currency_id, country_id):
""" Compute paypal fees.
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
if not self.fees_active:
return 0.0
country = self.env['res.country'].browse(country_id)
if country and self.company_id.country_id.id == country.id:
percentage = self.fees_dom_var
fixed = self.fees_dom_fixed
else:
percentage = self.fees_int_var
fixed = self.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed) / (1 - percentage / 100.0)
return fees
@api.multi
def paypal_form_generate_values(self, values):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
paypal_tx_values = dict(values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': self.paypal_email_account,
'item_name': '%s: %s' % (self.company_id.name, values['reference']),
'item_number': values['reference'],
'amount': values['amount'],
'currency_code': values['currency'] and values['currency'].name or '',
'address1': values.get('partner_address'),
'city': values.get('partner_city'),
'country': values.get('partner_country') and values.get('partner_country').code or '',
'state': values.get('partner_state') and (values.get('partner_state').code or values.get('partner_state').name) or '',
'email': values.get('partner_email'),
'zip_code': values.get('partner_zip'),
'first_name': values.get('partner_first_name'),
'last_name': values.get('partner_last_name'),
'paypal_return': urls.url_join(base_url, PaypalController._return_url),
'notify_url': urls.url_join(base_url, PaypalController._notify_url),
'cancel_return': urls.url_join(base_url, PaypalController._cancel_url),
'handling': '%.2f' % paypal_tx_values.pop('fees', 0.0) if self.fees_active else False,
'custom': json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')}) if paypal_tx_values.get('return_url') else False,
})
return paypal_tx_values
@api.multi
def paypal_get_form_action_url(self):
return self._get_paypal_urls(self.environment)['paypal_form_url']
class TxPaypal(models.Model):
_inherit = 'payment.transaction'
paypal_txn_type = fields.Char('Transaction type')
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def _paypal_form_get_tx_from_data(self, data):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = _('Paypal: received data with missing reference (%s) or txn_id (%s)') % (reference, txn_id)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
txs = self.env['payment.transaction'].search([('reference', '=', reference)])
if not txs or len(txs) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not txs:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.info(error_msg)
raise ValidationError(error_msg)
return txs[0]
@api.multi
def _paypal_form_get_invalid_parameters(self, data):
invalid_parameters = []
_logger.info('Received a notification from Paypal with IPN version %s', data.get('notify_version'))
if data.get('test_ipn'):
_logger.warning(
'Received a notification from Paypal using sandbox'
),
# TODO: txn_id: shoudl be false at draft, set afterwards, and verified with txn details
if self.acquirer_reference and data.get('txn_id') != self.acquirer_reference:
invalid_parameters.append(('txn_id', data.get('txn_id'), self.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('mc_gross', '0.0')), (self.amount + self.fees), 2) != 0:
invalid_parameters.append(('mc_gross', data.get('mc_gross'), '%.2f' % self.amount)) # mc_gross is amount + fees
if data.get('mc_currency') != self.currency_id.name:
invalid_parameters.append(('mc_currency', data.get('mc_currency'), self.currency_id.name))
if 'handling_amount' in data and float_compare(float(data.get('handling_amount')), self.fees, 2) != 0:
invalid_parameters.append(('handling_amount', data.get('handling_amount'), self.fees))
# check buyer
if self.payment_token_id and data.get('payer_id') != self.payment_token_id.acquirer_ref:
invalid_parameters.append(('payer_id', data.get('payer_id'), self.payment_token_id.acquirer_ref))
# check seller
if data.get('receiver_id') and self.acquirer_id.paypal_seller_account and data['receiver_id'] != self.acquirer_id.paypal_seller_account:
invalid_parameters.append(('receiver_id', data.get('receiver_id'), self.acquirer_id.paypal_seller_account))
if not data.get('receiver_id') or not self.acquirer_id.paypal_seller_account:
# Check receiver_email only if receiver_id was not checked.
# In Paypal, this is possible to configure as receiver_email a different email than the business email (the login email)
# In Odoo, there is only one field for the Paypal email: the business email. This isn't possible to set a receiver_email
# different than the business email. Therefore, if you want such a configuration in your Paypal, you are then obliged to fill
# the Merchant ID in the Paypal payment acquirer in Odoo, so the check is performed on this variable instead of the receiver_email.
# At least one of the two checks must be done, to avoid fraudsters.
if data.get('receiver_email') != self.acquirer_id.paypal_email_account:
invalid_parameters.append(('receiver_email', data.get('receiver_email'), self.acquirer_id.paypal_email_account))
return invalid_parameters
@api.multi
def _paypal_form_validate(self, data):
status = data.get('payment_status')
res = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (self.reference))
try:
# dateutil and pytz don't recognize abbreviations PDT/PST
tzinfos = {
'PST': -8 * 3600,
'PDT': -7 * 3600,
}
date = dateutil.parser.parse(data.get('payment_date'), tzinfos=tzinfos).astimezone(pytz.utc)
except:
date = fields.Datetime.now()
res.update(date=date)
self._set_transaction_done()
return self.write(res)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (self.reference))
res.update(state_message=data.get('pending_reason', ''))
self._set_transaction_pending()
return self.write(res)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (self.reference, status)
_logger.info(error)
res.update(state_message=error)
self._set_transaction_cancel()
return self.write(res)
| t3dev/odoo | addons/payment_paypal/models/payment.py | Python | gpl-3.0 | 11,083 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# experiments.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import unittest
import weka.core.jvm as jvm
import weka.core.converters as converters
import weka.classifiers as classifiers
import weka.experiments as experiments
import weka.plot.experiments as plot
import wekatests.tests.weka_test as weka_test
class TestExperiments(weka_test.WekaTest):
def test_plot_experiment(self):
"""
Tests the plot_experiment method.
"""
datasets = [self.datafile("bolts.arff"), self.datafile("bodyfat.arff"), self.datafile("autoPrice.arff")]
cls = [
classifiers.Classifier("weka.classifiers.trees.REPTree"),
classifiers.Classifier("weka.classifiers.functions.LinearRegression"),
classifiers.Classifier("weka.classifiers.functions.SMOreg"),
]
outfile = self.tempfile("results-rs.arff")
exp = experiments.SimpleRandomSplitExperiment(
classification=False,
runs=10,
percentage=66.6,
preserve_order=False,
datasets=datasets,
classifiers=cls,
result=outfile)
exp.setup()
exp.run()
# evaluate
loader = converters.loader_for_file(outfile)
data = loader.load_file(outfile)
matrix = experiments.ResultMatrix("weka.experiment.ResultMatrixPlainText")
tester = experiments.Tester("weka.experiment.PairedCorrectedTTester")
tester.resultmatrix = matrix
comparison_col = data.attribute_by_name("Correlation_coefficient").index
tester.instances = data
tester.header(comparison_col)
tester.multi_resultset_full(0, comparison_col)
# plot
plot.plot_experiment(matrix, title="Random split (w/ StdDev)", measure="Correlation coefficient", show_stdev=True, wait=False)
plot.plot_experiment(matrix, title="Random split", measure="Correlation coefficient", wait=False)
def suite():
"""
Returns the test suite.
:return: the test suite
:rtype: unittest.TestSuite
"""
return unittest.TestLoader().loadTestsFromTestCase(TestExperiments)
if __name__ == '__main__':
jvm.start()
unittest.TextTestRunner().run(suite())
jvm.stop()
| nvoron23/python-weka-wrapper | tests/wekatests/plottests/experiments.py | Python | gpl-3.0 | 2,900 |
# lint-amnesty, pylint: disable=missing-module-docstring
from unittest.mock import patch
from django.test import TestCase
from common.djangoapps.track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.mongo_patcher = patch('common.djangoapps.track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
assert len(calls) == 2
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
assert events[0] == first_argument(calls[0])
assert events[1] == first_argument(calls[1])
| eduNEXT/edx-platform | common/djangoapps/track/backends/tests/test_mongodb.py | Python | agpl-3.0 | 1,162 |
"""Capa's specialized use of codejail.safe_exec."""
import hashlib
from codejail.safe_exec import SafeExecException, json_safe
from codejail.safe_exec import not_safe_exec as codejail_not_safe_exec
from codejail.safe_exec import safe_exec as codejail_safe_exec
from edx_django_utils.monitoring import function_trace
import six
from six import text_type
from . import lazymod
from .remote_exec import is_codejail_rest_service_enabled, get_remote_exec
# Establish the Python environment for Capa.
# Capa assumes float-friendly division always.
# The name "random" is a properly-seeded stand-in for the random module.
CODE_PROLOG = """\
from __future__ import absolute_import, division
import os
os.environ["OPENBLAS_NUM_THREADS"] = "1" # See TNL-6456
import random2 as random_module
import sys
from six.moves import xrange
random = random_module.Random(%r)
random.Random = random_module.Random
sys.modules['random'] = random
"""
ASSUMED_IMPORTS = [
("numpy", "numpy"),
("math", "math"),
("scipy", "scipy"),
("calc", "calc"),
("eia", "eia"),
("chemcalc", "chem.chemcalc"),
("chemtools", "chem.chemtools"),
("miller", "chem.miller"),
("draganddrop", "verifiers.draganddrop"),
]
# We'll need the code from lazymod.py for use in safe_exec, so read it now.
lazymod_py_file = lazymod.__file__
if lazymod_py_file.endswith("c"):
lazymod_py_file = lazymod_py_file[:-1]
with open(lazymod_py_file) as f:
lazymod_py = f.read()
LAZY_IMPORTS = [lazymod_py]
for name, modname in ASSUMED_IMPORTS:
LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname))
LAZY_IMPORTS = "".join(LAZY_IMPORTS)
def update_hash(hasher, obj):
"""
Update a `hashlib` hasher with a nested object.
To properly cache nested structures, we need to compute a hash from the
entire structure, canonicalizing at every level.
`hasher`'s `.update()` method is called a number of times, touching all of
`obj` in the process. Only primitive JSON-safe types are supported.
"""
hasher.update(six.b(str(type(obj))))
if isinstance(obj, (tuple, list)):
for e in obj:
update_hash(hasher, e)
elif isinstance(obj, dict):
for k in sorted(obj):
update_hash(hasher, k)
update_hash(hasher, obj[k])
else:
hasher.update(six.b(repr(obj)))
@function_trace('safe_exec')
def safe_exec(
code,
globals_dict,
random_seed=None,
python_path=None,
extra_files=None,
cache=None,
limit_overrides_context=None,
slug=None,
unsafely=False,
):
"""
Execute python code safely.
`code` is the Python code to execute. It has access to the globals in `globals_dict`,
and any changes it makes to those globals are visible in `globals_dict` when this
function returns.
`random_seed` will be used to see the `random` module available to the code.
`python_path` is a list of filenames or directories to add to the Python
path before execution. If the name is not in `extra_files`, then it will
also be copied into the sandbox.
`extra_files` is a list of (filename, contents) pairs. These files are
created in the sandbox.
`cache` is an object with .get(key) and .set(key, value) methods. It will be used
to cache the execution, taking into account the code, the values of the globals,
and the random seed.
`limit_overrides_context` is an optional string to be used as a key on
the `settings.CODE_JAIL['limit_overrides']` dictionary in order to apply
context-specific overrides to the codejail execution limits.
If `limit_overrides_context` is omitted or not present in limit_overrides,
then use the default limits specified insettings.CODE_JAIL['limits'].
`slug` is an arbitrary string, a description that's meaningful to the
caller, that will be used in log messages.
If `unsafely` is true, then the code will actually be executed without sandboxing.
"""
# Check the cache for a previous result.
if cache:
safe_globals = json_safe(globals_dict)
md5er = hashlib.md5()
md5er.update(repr(code).encode('utf-8'))
update_hash(md5er, safe_globals)
key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest())
cached = cache.get(key)
if cached is not None:
# We have a cached result. The result is a pair: the exception
# message, if any, else None; and the resulting globals dictionary.
emsg, cleaned_results = cached
globals_dict.update(cleaned_results)
if emsg:
raise SafeExecException(emsg)
return
# Create the complete code we'll run.
code_prolog = CODE_PROLOG % random_seed
if is_codejail_rest_service_enabled():
data = {
"code": code_prolog + LAZY_IMPORTS + code,
"globals_dict": globals_dict,
"python_path": python_path,
"limit_overrides_context": limit_overrides_context,
"slug": slug,
"unsafely": unsafely,
"extra_files": extra_files,
}
emsg, exception = get_remote_exec(data)
else:
# Decide which code executor to use.
if unsafely:
exec_fn = codejail_not_safe_exec
else:
exec_fn = codejail_safe_exec
# Run the code! Results are side effects in globals_dict.
try:
exec_fn(
code_prolog + LAZY_IMPORTS + code,
globals_dict,
python_path=python_path,
extra_files=extra_files,
limit_overrides_context=limit_overrides_context,
slug=slug,
)
except SafeExecException as e:
# Saving SafeExecException e in exception to be used later.
exception = e
emsg = text_type(e)
else:
emsg = None
# Put the result back in the cache. This is complicated by the fact that
# the globals dict might not be entirely serializable.
if cache:
cleaned_results = json_safe(globals_dict)
cache.set(key, (emsg, cleaned_results))
# If an exception happened, raise it now.
if emsg:
raise exception
| eduNEXT/edx-platform | common/lib/capa/capa/safe_exec/safe_exec.py | Python | agpl-3.0 | 6,279 |
from ddt import ddt, data
from django.core.urlresolvers import reverse
from django.test import TestCase
import mock
from analyticsclient.exceptions import NotFoundError
from courses.tests import SwitchMixin
from courses.tests.test_views import ViewTestMixin, DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID
from courses.tests.utils import convert_list_of_dicts_to_csv, get_mock_api_enrollment_geography_data, \
get_mock_api_enrollment_data, get_mock_api_course_activity, get_mock_api_enrollment_age_data, \
get_mock_api_enrollment_education_data, get_mock_api_enrollment_gender_data
@ddt
# pylint: disable=abstract-method
class CourseCSVTestMixin(ViewTestMixin):
client = None
column_headings = None
base_file_name = None
def assertIsValidCSV(self, course_id, csv_data):
response = self.client.get(self.path(course_id=course_id))
# Check content type
self.assertResponseContentType(response, 'text/csv')
# Check filename
csv_prefix = u'edX-DemoX-Demo_2014' if course_id == DEMO_COURSE_ID else u'edX-DemoX-Demo_Course'
filename = '{0}--{1}.csv'.format(csv_prefix, self.base_file_name)
self.assertResponseFilename(response, filename)
# Check data
self.assertEqual(response.content, csv_data)
def assertResponseContentType(self, response, content_type):
self.assertEqual(response['Content-Type'], content_type)
def assertResponseFilename(self, response, filename):
self.assertEqual(response['Content-Disposition'], 'attachment; filename="{0}"'.format(filename))
def _test_csv(self, course_id, csv_data):
with mock.patch(self.api_method, return_value=csv_data):
self.assertIsValidCSV(course_id, csv_data)
@data(DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID)
def test_response_no_data(self, course_id):
# Create an "empty" CSV that only has headers
csv_data = convert_list_of_dicts_to_csv([], self.column_headings)
self._test_csv(course_id, csv_data)
@data(DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID)
def test_response(self, course_id):
csv_data = self.get_mock_data(course_id)
csv_data = convert_list_of_dicts_to_csv(csv_data)
self._test_csv(course_id, csv_data)
def test_404(self):
course_id = 'fakeOrg/soFake/Fake_Course'
self.grant_permission(self.user, course_id)
path = reverse(self.viewname, kwargs={'course_id': course_id})
with mock.patch(self.api_method, side_effect=NotFoundError):
response = self.client.get(path, follow=True)
self.assertEqual(response.status_code, 404)
class CourseEnrollmentByCountryCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_geography'
column_headings = ['count', 'country', 'course_id', 'date']
base_file_name = 'enrollment-location'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_geography_data(course_id)
class CourseEnrollmentCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment'
column_headings = ['count', 'course_id', 'date']
base_file_name = 'enrollment'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_data(course_id)
class CourseEnrollmentModeCSVViewTests(SwitchMixin, CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment'
column_headings = ['count', 'course_id', 'date', 'audit', 'honor', 'professional', 'verified']
base_file_name = 'enrollment'
api_method = 'analyticsclient.course.Course.enrollment'
@classmethod
def setUpClass(cls):
cls.toggle_switch('display_verified_enrollment', True)
def get_mock_data(self, course_id):
return get_mock_api_enrollment_data(course_id)
class CourseEnrollmentDemographicsByAgeCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_age'
column_headings = ['birth_year', 'count', 'course_id', 'created', 'date']
base_file_name = 'enrollment-by-birth-year'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_age_data(course_id)
class CourseEnrollmentDemographicsByEducationCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_education'
column_headings = ['count', 'course_id', 'created', 'date', 'education_level.name', 'education_level.short_name']
base_file_name = 'enrollment-by-education'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_education_data(course_id)
class CourseEnrollmentByDemographicsGenderCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_gender'
column_headings = ['count', 'course_id', 'created', 'date', 'gender']
base_file_name = 'enrollment-by-gender'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_gender_data(course_id)
class CourseEngagementActivityTrendCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:engagement_activity_trend'
column_headings = ['any', 'attempted_problem', 'course_id', 'interval_end', 'interval_start',
'played_video', 'posted_forum']
base_file_name = 'engagement-activity'
api_method = 'analyticsclient.course.Course.activity'
def get_mock_data(self, course_id):
return get_mock_api_course_activity(course_id)
| open-craft/edx-analytics-dashboard | analytics_dashboard/courses/tests/test_views/test_csv.py | Python | agpl-3.0 | 5,748 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo import api, fields, models
class ProductProduct(models.Model):
_inherit = "product.product"
date_from = fields.Date(compute='_compute_product_margin_fields_values', string='Margin Date From')
date_to = fields.Date(compute='_compute_product_margin_fields_values', string='Margin Date To')
invoice_state = fields.Selection(compute='_compute_product_margin_fields_values',
selection=[
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid')
], string='Invoice State', readonly=True)
sale_avg_price = fields.Float(compute='_compute_product_margin_fields_values', string='Avg. Sale Unit Price',
help="Avg. Price in Customer Invoices.")
purchase_avg_price = fields.Float(compute='_compute_product_margin_fields_values', string='Avg. Purchase Unit Price',
help="Avg. Price in Vendor Bills ")
sale_num_invoiced = fields.Float(compute='_compute_product_margin_fields_values', string='# Invoiced in Sale',
help="Sum of Quantity in Customer Invoices")
purchase_num_invoiced = fields.Float(compute='_compute_product_margin_fields_values', string='# Invoiced in Purchase',
help="Sum of Quantity in Vendor Bills")
sales_gap = fields.Float(compute='_compute_product_margin_fields_values', string='Sales Gap',
help="Expected Sale - Turn Over")
purchase_gap = fields.Float(compute='_compute_product_margin_fields_values', string='Purchase Gap',
help="Normal Cost - Total Cost")
turnover = fields.Float(compute='_compute_product_margin_fields_values', string='Turnover',
help="Sum of Multiplication of Invoice price and quantity of Customer Invoices")
total_cost = fields.Float(compute='_compute_product_margin_fields_values', string='Total Cost',
help="Sum of Multiplication of Invoice price and quantity of Vendor Bills ")
sale_expected = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Sale',
help="Sum of Multiplication of Sale Catalog price and quantity of Customer Invoices")
normal_cost = fields.Float(compute='_compute_product_margin_fields_values', string='Normal Cost',
help="Sum of Multiplication of Cost price and quantity of Vendor Bills")
total_margin = fields.Float(compute='_compute_product_margin_fields_values', string='Total Margin',
help="Turnover - Standard price")
expected_margin = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Margin',
help="Expected Sale - Normal Cost")
total_margin_rate = fields.Float(compute='_compute_product_margin_fields_values', string='Total Margin Rate(%)',
help="Total margin * 100 / Turnover")
expected_margin_rate = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Margin (%)',
help="Expected margin * 100 / Expected Sale")
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
"""
Inherit read_group to calculate the sum of the non-stored fields, as it is not automatically done anymore through the XML.
"""
res = super(ProductProduct, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
fields_list = ['turnover', 'sale_avg_price', 'sale_purchase_price', 'sale_num_invoiced', 'purchase_num_invoiced',
'sales_gap', 'purchase_gap', 'total_cost', 'sale_expected', 'normal_cost', 'total_margin',
'expected_margin', 'total_margin_rate', 'expected_margin_rate']
if any(x in fields for x in fields_list):
# Calculate first for every product in which line it needs to be applied
re_ind = 0
prod_re = {}
tot_products = self.browse([])
for re in res:
if re.get('__domain'):
products = self.search(re['__domain'])
tot_products |= products
for prod in products:
prod_re[prod.id] = re_ind
re_ind += 1
res_val = tot_products._compute_product_margin_fields_values(field_names=[x for x in fields if fields in fields_list])
for key in res_val:
for l in res_val[key]:
re = res[prod_re[key]]
if re.get(l):
re[l] += res_val[key][l]
else:
re[l] = res_val[key][l]
return res
def _compute_product_margin_fields_values(self, field_names=None):
res = {}
if field_names is None:
field_names = []
for val in self:
res[val.id] = {}
date_from = self.env.context.get('date_from', time.strftime('%Y-01-01'))
date_to = self.env.context.get('date_to', time.strftime('%Y-12-31'))
invoice_state = self.env.context.get('invoice_state', 'open_paid')
res[val.id]['date_from'] = date_from
res[val.id]['date_to'] = date_to
res[val.id]['invoice_state'] = invoice_state
states = ()
payment_states = ()
if invoice_state == 'paid':
states = ('posted',)
payment_states = ('paid',)
elif invoice_state == 'open_paid':
states = ('posted',)
payment_states = ('not_paid', 'paid')
elif invoice_state == 'draft_open_paid':
states = ('posted', 'draft')
payment_states = ('not_paid', 'paid')
company_id = self.env.company.id
#Cost price is calculated afterwards as it is a property
self.env['account.move.line'].flush(['price_unit', 'quantity', 'balance', 'product_id', 'display_type'])
self.env['account.move'].flush(['state', 'payment_state', 'move_type', 'invoice_date', 'company_id'])
self.env['product.template'].flush(['list_price'])
sqlstr = """
WITH currency_rate AS ({})
SELECT
SUM(l.price_unit / (CASE COALESCE(cr.rate, 0) WHEN 0 THEN 1.0 ELSE cr.rate END) * l.quantity) / NULLIF(SUM(l.quantity),0) AS avg_unit_price,
SUM(l.quantity * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS num_qty,
SUM(ABS(l.balance) * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS total,
SUM(l.quantity * pt.list_price * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS sale_expected
FROM account_move_line l
LEFT JOIN account_move i ON (l.move_id = i.id)
LEFT JOIN product_product product ON (product.id=l.product_id)
LEFT JOIN product_template pt ON (pt.id = product.product_tmpl_id)
left join currency_rate cr on
(cr.currency_id = i.currency_id and
cr.company_id = i.company_id and
cr.date_start <= COALESCE(i.invoice_date, NOW()) and
(cr.date_end IS NULL OR cr.date_end > COALESCE(i.invoice_date, NOW())))
WHERE l.product_id = %s
AND i.state IN %s
AND i.payment_state IN %s
AND i.move_type IN %s
AND i.invoice_date BETWEEN %s AND %s
AND i.company_id = %s
AND l.display_type IS NULL
AND l.exclude_from_invoice_tab = false
""".format(self.env['res.currency']._select_companies_rates())
invoice_types = ('out_invoice', 'out_refund')
self.env.cr.execute(sqlstr, (val.id, states, payment_states, invoice_types, date_from, date_to, company_id))
result = self.env.cr.fetchall()[0]
res[val.id]['sale_avg_price'] = result[0] and result[0] or 0.0
res[val.id]['sale_num_invoiced'] = result[1] and result[1] or 0.0
res[val.id]['turnover'] = result[2] and result[2] or 0.0
res[val.id]['sale_expected'] = result[3] and result[3] or 0.0
res[val.id]['sales_gap'] = res[val.id]['sale_expected'] - res[val.id]['turnover']
invoice_types = ('in_invoice', 'in_refund')
self.env.cr.execute(sqlstr, (val.id, states, payment_states, invoice_types, date_from, date_to, company_id))
result = self.env.cr.fetchall()[0]
res[val.id]['purchase_avg_price'] = result[0] and result[0] or 0.0
res[val.id]['purchase_num_invoiced'] = result[1] and result[1] or 0.0
res[val.id]['total_cost'] = result[2] and result[2] or 0.0
res[val.id]['normal_cost'] = val.standard_price * res[val.id]['purchase_num_invoiced']
res[val.id]['purchase_gap'] = res[val.id]['normal_cost'] - res[val.id]['total_cost']
res[val.id]['total_margin'] = res[val.id]['turnover'] - res[val.id]['total_cost']
res[val.id]['expected_margin'] = res[val.id]['sale_expected'] - res[val.id]['normal_cost']
res[val.id]['total_margin_rate'] = res[val.id]['turnover'] and res[val.id]['total_margin'] * 100 / res[val.id]['turnover'] or 0.0
res[val.id]['expected_margin_rate'] = res[val.id]['sale_expected'] and res[val.id]['expected_margin'] * 100 / res[val.id]['sale_expected'] or 0.0
for k, v in res[val.id].items():
setattr(val, k, v)
return res
| ygol/odoo | addons/product_margin/models/product_product.py | Python | agpl-3.0 | 9,711 |
# clean sequences after alignment, criteria based on sequences
# make inline with canonical ordering (no extra gaps)
import os, datetime, time, re
from itertools import izip
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from scipy import stats
import numpy as np
class virus_clean(object):
"""docstring for virus_clean"""
def __init__(self,n_iqd = 5, **kwargs):
'''
parameters
n_std -- number of interquartile distances accepted in molecular clock filter
'''
self.n_iqd = n_iqd
def remove_insertions(self):
'''
remove all columns from the alignment in which the outgroup is gapped
'''
outgroup_ok = np.array(self.sequence_lookup[self.outgroup['strain']])!='-'
for seq in self.viruses:
seq.seq = Seq("".join(np.array(seq.seq)[outgroup_ok]).upper())
def clean_gaps(self):
'''
remove viruses with gaps -- not part of the standard pipeline
'''
self.viruses = filter(lambda x: '-' in x.seq, self.viruses)
def clean_ambiguous(self):
'''
substitute all ambiguous characters with '-',
ancestral inference will interpret this as missing data
'''
for v in self.viruses:
v.seq = Seq(re.sub(r'[BDEFHIJKLMNOPQRSUVWXYZ]', '-',str(v.seq)))
def unique_date(self):
'''
add a unique numerical date to each leaf. uniqueness is achieved adding a small number
'''
from date_util import numerical_date
og = self.sequence_lookup[self.outgroup['strain']]
if hasattr(og, 'date'):
try:
og.num_date = numerical_date(og.date)
except:
print "cannot parse date"
og.num_date="undefined";
for ii, v in enumerate(self.viruses):
if hasattr(v, 'date'):
try:
v.num_date = numerical_date(v.date, self.date_format['fields']) + 1e-7*(ii+1)
except:
print "cannot parse date"
v.num_date="undefined";
def times_from_outgroup(self):
outgroup_date = self.sequence_lookup[self.outgroup['strain']].num_date
return np.array([x.num_date-outgroup_date for x in self.viruses if x.strain])
def distance_from_outgroup(self):
from seq_util import hamming_distance
outgroup_seq = self.sequence_lookup[self.outgroup['strain']].seq
return np.array([hamming_distance(x.seq, outgroup_seq) for x in self.viruses if x.strain])
def clean_distances(self):
"""Remove viruses that don't follow a loose clock """
times = self.times_from_outgroup()
distances = self.distance_from_outgroup()
slope, intercept, r_value, p_value, std_err = stats.linregress(times, distances)
residuals = slope*times + intercept - distances
r_iqd = stats.scoreatpercentile(residuals,75) - stats.scoreatpercentile(residuals,25)
if self.verbose:
print "\tslope: " + str(slope)
print "\tr: " + str(r_value)
print "\tresiduals iqd: " + str(r_iqd)
new_viruses = []
for (v,r) in izip(self.viruses,residuals):
# filter viruses more than n_std standard devitations up or down
if np.abs(r)<self.n_iqd * r_iqd or v.id == self.outgroup["strain"]:
new_viruses.append(v)
else:
if self.verbose>1:
print "\t\tresidual:", r, "\nremoved ",v.strain
self.viruses = MultipleSeqAlignment(new_viruses)
def clean_generic(self):
print "Number of viruses before cleaning:",len(self.viruses)
self.unique_date()
self.remove_insertions()
self.clean_ambiguous()
self.clean_distances()
self.viruses.sort(key=lambda x:x.num_date)
print "Number of viruses after outlier filtering:",len(self.viruses)
| doerlbh/Indie-nextflu | augur/src/virus_clean.py | Python | agpl-3.0 | 3,403 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.rest_api.v1.serializers', 'cms.djangoapps.contentstore.rest_api.v1.serializers')
from cms.djangoapps.contentstore.rest_api.v1.serializers import *
| eduNEXT/edunext-platform | import_shims/studio/contentstore/rest_api/v1/serializers.py | Python | agpl-3.0 | 431 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class XorgServer(AutotoolsPackage, XorgPackage):
"""X.Org Server is the free and open source implementation of the display
server for the X Window System stewarded by the X.Org Foundation."""
homepage = "http://cgit.freedesktop.org/xorg/xserver"
xorg_mirror_path = "xserver/xorg-server-1.18.99.901.tar.gz"
version('1.18.99.901', sha256='c8425163b588de2ee7e5c8e65b0749f2710f55a7e02a8d1dc83b3630868ceb21')
depends_on('[email protected]:')
depends_on('font-util')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('libx11')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('videoproto')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('[email protected]:')
depends_on('xineramaproto')
depends_on('libxkbfile')
depends_on('libxfont2')
depends_on('libxext')
depends_on('libxdamage')
depends_on('libxfixes')
depends_on('libepoxy')
| iulian787/spack | var/spack/repos/builtin/packages/xorg-server/package.py | Python | lgpl-2.1 | 2,055 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack.cmd.location
import spack.modules
description = "cd to spack directories in the shell"
section = "environment"
level = "long"
def setup_parser(subparser):
"""This is for decoration -- spack cd is used through spack's
shell support. This allows spack cd to print a descriptive
help message when called with -h."""
spack.cmd.location.setup_parser(subparser)
def cd(parser, args):
spack.modules.print_help()
| TheTimmy/spack | lib/spack/spack/cmd/cd.py | Python | lgpl-2.1 | 1,684 |
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| harmy/kbengine | kbe/res/scripts/common/Lib/sre_constants.py | Python | lgpl-3.0 | 7,444 |
"""
Multiple dictation constructs
===============================================================================
This file is a showcase investigating the use and functionality of multiple
dictation elements within Dragonfly speech recognition grammars.
The first part of this file (i.e. the module's doc string) contains a
description of the functionality being investigated along with test code
and actual output in doctest format. This allows the reader to see what
really would happen, without needing to load the file into a speech
recognition engine and put effort into speaking all the showcased
commands.
The test code below makes use of Dragonfly's built-in element testing tool.
When run, it will connect to the speech recognition engine, load the element
being tested, mimic recognitions, and process the recognized value.
Multiple consecutive dictation elements
-------------------------------------------------------------------------------
>>> tester = ElementTester(RuleRef(ConsecutiveDictationRule()))
>>> print(tester.recognize("consecutive Alice Bob Charlie"))
Recognition: "consecutive Alice Bob Charlie"
Word and rule pairs: ("1000000" is "dgndictation")
- consecutive (1)
- Alice (1000000)
- Bob (1000000)
- Charlie (1000000)
Extras:
- dictation1: Alice
- dictation2: Bob
- dictation3: Charlie
>>> print(tester.recognize("consecutive Alice Bob"))
RecognitionFailure
Mixed literal and dictation elements
-------------------------------------------------------------------------------
Here we will investigate mixed, i.e. interspersed, fixed literal command
words and dynamic dictation elements. We will use the "MixedDictationRule"
class which has a spec of
"mixed [<dictation1>] <dictation2> command <dictation3>".
Note that "<dictation1>" was made optional instead of "<dictation2>"
because otherwise the first dictation elements would always gobble up
all dictated words. There would (by definition) be no way to distinguish
which words correspond with which dictation elements. Such consecutive
dictation elements should for that reason be avoided in real command
grammars. The way the spec is defined now, adds some interesting
dynamics, because of the order in which they dictation elements parse
the recognized words. However, do note that that order is well defined
but arbitrarily chosen.
>>> tester = ElementTester(RuleRef(MixedDictationRule()))
>>> print(tester.recognize("mixed Alice Bob command Charlie"))
Recognition: "mixed Alice Bob command Charlie"
Word and rule pairs: ("1000000" is "dgndictation")
- mixed (1)
- Alice (1000000)
- Bob (1000000)
- command (1)
- Charlie (1000000)
Extras:
- dictation1: Alice
- dictation2: Bob
- dictation3: Charlie
>>> print(tester.recognize("mixed Alice command Charlie"))
Recognition: "mixed Alice command Charlie"
Word and rule pairs: ("1000000" is "dgndictation")
- mixed (1)
- Alice (1000000)
- command (1)
- Charlie (1000000)
Extras:
- dictation2: Alice
- dictation3: Charlie
>>> print(tester.recognize("mixed Alice Bob command"))
RecognitionFailure
>>> print(tester.recognize("mixed command Charlie"))
RecognitionFailure
Repetition of dictation elements
-------------------------------------------------------------------------------
Now let's take a look at repetition of dictation elements. For this
we will use the "RepeatedDictationRule" class, which defines its spec
as a repetition of "command <dictation>". I.e. "command Alice" will
match, and "command Alice command Bob" will also match.
Note that this rule is inherently ambiguous, given the lack of a
clear definition of grouping or precedence rules for fixed literal
words in commands, and dynamic dictation elements. As an example,
"command Alice command Bob" could either match 2 repetitions with
"Alice" and "Bob" as dictation values, or a single repetition with
"Alice command Bob" as its only dictation value. The tests below
the show which of these actually occurs.
>>> tester = ElementTester(RuleRef(RepeatedDictationRule()))
>>> print(tester.recognize("command Alice"))
Recognition: "command Alice"
Word and rule pairs: ("1000000" is "dgndictation")
- command (1)
- Alice (1000000)
Extras:
- repetition: [[u'command', NatlinkDictationContainer(Alice)]]
>>> print(tester.recognize("command Alice command Bob"))
Recognition: "command Alice command Bob"
Word and rule pairs: ("1000000" is "dgndictation")
- command (1)
- Alice (1000000)
- command (1000000)
- Bob (1000000)
Extras:
- repetition: [[u'command', NatlinkDictationContainer(Alice, command, Bob)]]
"""
#---------------------------------------------------------------------------
import doctest
from dragonfly import *
from dragonfly.test.infrastructure import RecognitionFailure
from dragonfly.test.element_testcase import ElementTestCase
from dragonfly.test.element_tester import ElementTester
#---------------------------------------------------------------------------
class RecognitionAnalysisRule(CompoundRule):
"""
Base class that implements reporting in human-readable format
details about the recognized phrase. It is used by the actual
testing rules below, and allows the doctests above to be easily
readable and informative.
"""
def _process_recognition(self, node, extras):
Paste(text).execute()
def value(self, node):
return self.get_recognition_info(node)
def get_recognition_info(self, node):
output = []
output.append('Recognition: "{0}"'.format(" ".join(node.words())))
output.append('Word and rule pairs: ("1000000" is "dgndictation")')
for word, rule in node.full_results():
output.append(" - {0} ({1})".format(word, rule))
output.append("Extras:")
for key in sorted(extra.name for extra in self.extras):
extra_node = node.get_child_by_name(key)
if extra_node:
output.append(" - {0}: {1}".format(key, extra_node.value()))
return "\n".join(output)
#---------------------------------------------------------------------------
class ConsecutiveDictationRule(RecognitionAnalysisRule):
spec = "consecutive <dictation1> <dictation2> <dictation3>"
extras = [Dictation("dictation1"),
Dictation("dictation2"),
Dictation("dictation3")]
#---------------------------------------------------------------------------
class MixedDictationRule(RecognitionAnalysisRule):
spec = "mixed [<dictation1>] <dictation2> command <dictation3>"
extras = [Dictation("dictation1"),
Dictation("dictation2"),
Dictation("dictation3")]
#---------------------------------------------------------------------------
class RepeatedDictationRule(RecognitionAnalysisRule):
spec = "<repetition>"
extras = [Repetition(name="repetition",
child=Sequence([Literal("command"),
Dictation()]))]
#---------------------------------------------------------------------------
def main():
engine = get_engine()
engine.connect()
try:
doctest.testmod(verbose=True)
finally:
engine.disconnect()
if __name__ == "__main__":
main()
| Versatilus/dragonfly | dragonfly/examples/test_multiple_dictation.py | Python | lgpl-3.0 | 7,289 |
import os
import unittest
from mock import patch, Mock
from tests.utils import (
FakedCache,
ObjectWithSignals,
setup_test_env,
)
setup_test_env()
from softwarecenter.db.database import StoreDatabase
from softwarecenter.ui.gtk3.views import lobbyview
from softwarecenter.ui.gtk3.widgets.exhibits import (
_HtmlRenderer,
)
class ExhibitsTestCase(unittest.TestCase):
"""The test suite for the exhibits carousel."""
def setUp(self):
self.cache = FakedCache()
self.db = StoreDatabase(cache=self.cache)
self.lobby = lobbyview.LobbyView(cache=self.cache, db=self.db,
icons=None, apps_filter=None)
self.addCleanup(self.lobby.destroy)
def _get_banner_from_lobby(self):
return self.lobby.vbox.get_children()[-1].get_child()
def test_featured_exhibit_by_default(self):
"""Show the featured exhibit before querying the remote service."""
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIsInstance(banner.exhibits[0], lobbyview.FeaturedExhibit)
def test_no_exhibit_if_not_available(self):
"""The exhibit should not be shown if the package is not available."""
exhibit = Mock()
exhibit.package_names = u'foobarbaz'
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIsInstance(banner.exhibits[0], lobbyview.FeaturedExhibit)
def test_exhibit_if_available(self):
"""The exhibit should be shown if the package is available."""
exhibit = Mock()
exhibit.package_names = u'foobarbaz'
exhibit.banner_urls = ['banner']
exhibit.title_translated = ''
self.cache[u'foobarbaz'] = Mock()
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIs(banner.exhibits[0], exhibit)
def test_exhibit_if_mixed_availability(self):
"""The exhibit should be shown even if some are not available."""
# available exhibit
exhibit = Mock()
exhibit.package_names = u'foobarbaz'
exhibit.banner_urls = ['banner']
exhibit.title_translated = ''
self.cache[u'foobarbaz'] = Mock()
# not available exhibit
other = Mock()
other.package_names = u'not-there'
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca,
[exhibit, other])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
self.lobby._append_banner_ads()
banner = self._get_banner_from_lobby()
self.assertEqual(1, len(banner.exhibits))
self.assertIs(banner.exhibits[0], exhibit)
def test_exhibit_with_url(self):
# available exhibit
exhibit = Mock()
exhibit.package_names = ''
exhibit.click_url = 'http://example.com'
exhibit.banner_urls = ['banner']
exhibit.title_translated = ''
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca,
[exhibit])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
# add the banners
self.lobby._append_banner_ads()
# fake click
alloc = self.lobby.exhibit_banner.get_allocation()
mock_event = Mock()
mock_event.x = alloc.x
mock_event.y = alloc.y
with patch.object(self.lobby.exhibit_banner, 'emit') as mock_emit:
self.lobby.exhibit_banner.on_button_press(None, mock_event)
self.lobby.exhibit_banner.on_button_release(None, mock_event)
mock_emit.assert_called()
signal_name = mock_emit.call_args[0][0]
call_exhibit = mock_emit.call_args[0][1]
self.assertEqual(signal_name, "show-exhibits-clicked")
self.assertEqual(call_exhibit.click_url, "http://example.com")
def test_exhibit_with_featured_exhibit(self):
""" regression test for bug #1023777 """
sca = ObjectWithSignals()
sca.query_exhibits = lambda: sca.emit('exhibits', sca,
[lobbyview.FeaturedExhibit()])
with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca):
# add the banners
self.lobby._append_banner_ads()
# fake click
alloc = self.lobby.exhibit_banner.get_allocation()
mock_event = Mock()
mock_event.x = alloc.x
mock_event.y = alloc.y
with patch.object(self.lobby, 'emit') as mock_emit:
self.lobby.exhibit_banner.on_button_press(None, mock_event)
self.lobby.exhibit_banner.on_button_release(None, mock_event)
mock_emit.assert_called()
signal_name = mock_emit.call_args[0][0]
call_category = mock_emit.call_args[0][1]
self.assertEqual(signal_name, "category-selected")
self.assertEqual(call_category.name, "Our star apps")
class HtmlRendererTestCase(unittest.TestCase):
def test_multiple_images(self):
downloader = ObjectWithSignals()
downloader.download_file = lambda *args, **kwargs: downloader.emit(
"file-download-complete", downloader, os.path.basename(args[0]))
with patch("softwarecenter.ui.gtk3.widgets.exhibits."
"SimpleFileDownloader", lambda: downloader):
renderer = _HtmlRenderer()
mock_exhibit = Mock()
mock_exhibit.banner_urls = [
"http://example.com/path1/banner1.png",
"http://example.com/path2/banner2.png",
]
mock_exhibit.html = "url('/path1/banner1.png')#"\
"url('/path2/banner2.png')"
renderer.set_exhibit(mock_exhibit)
# assert the stuff we expected to get downloaded got downloaded
self.assertEqual(
renderer._downloaded_banner_images,
["banner1.png", "banner2.png"])
# test that the path mangling worked
self.assertEqual(
mock_exhibit.html, "url('banner1.png')#url('banner2.png')")
if __name__ == "__main__":
unittest.main()
| sti-lyneos/shop | tests/gtk3/test_exhibits.py | Python | lgpl-3.0 | 6,973 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Result'
db.create_table('taxonomy_result', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('taxonomy', ['Result'])
# Adding model 'Tag'
db.create_table('taxonomy_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
))
db.send_create_signal('taxonomy', ['Tag'])
# Adding model 'Category'
db.create_table('taxonomy_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['taxonomy.Category'])),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
))
db.send_create_signal('taxonomy', ['Category'])
# Adding model 'Vote'
db.create_table('taxonomy_vote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(related_name='poll_votes', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('taxonomy', ['Vote'])
# Adding unique constraint on 'Vote', fields ['owner', 'content_type', 'object_id']
db.create_unique('taxonomy_vote', ['owner_id', 'content_type_id', 'object_id'])
def backwards(self, orm):
# Removing unique constraint on 'Vote', fields ['owner', 'content_type', 'object_id']
db.delete_unique('taxonomy_vote', ['owner_id', 'content_type_id', 'object_id'])
# Deleting model 'Result'
db.delete_table('taxonomy_result')
# Deleting model 'Tag'
db.delete_table('taxonomy_tag')
# Deleting model 'Category'
db.delete_table('taxonomy_category')
# Deleting model 'Vote'
db.delete_table('taxonomy_vote')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taxonomy.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['taxonomy.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'taxonomy.result': {
'Meta': {'object_name': 'Result'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taxonomy.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'taxonomy.vote': {
'Meta': {'unique_together': "(('owner', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['taxonomy']
| zuck/prometeo-erp | core/taxonomy/migrations/0001_initial.py | Python | lgpl-3.0 | 8,429 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Memorystore operators"""
import os
from urllib.parse import urlparse
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_MEMORYSTORE
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_ARCHIVE_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb")
GCP_ARCHIVE_URL_PARTS = urlparse(GCP_ARCHIVE_URL)
GCP_BUCKET_NAME = GCP_ARCHIVE_URL_PARTS.netloc
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_MEMORYSTORE)
class CloudMemorystoreSystemTest(GoogleSystemTest):
"""
System tests for Google Cloud Memorystore operators
It use a real service.
"""
@provide_gcp_context(GCP_MEMORYSTORE)
def setUp(self):
super().setUp()
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_redis(self):
self.run_dag('gcp_cloud_memorystore_redis', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_memcached(self):
self.run_dag('gcp_cloud_memorystore_memcached', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
| airbnb/airflow | tests/providers/google/cloud/operators/test_cloud_memorystore_system.py | Python | apache-2.0 | 2,251 |
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import AbstractNode, NodeLog
from osf.utils import permissions
from osf.utils.sanitize import strip_html
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeChildrenList:
@pytest.fixture()
def private_project(self, user):
private_project = ProjectFactory()
private_project.add_contributor(
user,
permissions=permissions.WRITE
)
private_project.save()
return private_project
@pytest.fixture()
def component(self, user, private_project):
return NodeFactory(parent=private_project, creator=user)
@pytest.fixture()
def pointer(self):
return ProjectFactory()
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/children/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_component(self, user, public_project):
return NodeFactory(parent=public_project, creator=user, is_public=True)
@pytest.fixture()
def public_project_url(self, user, public_project):
return '/{}nodes/{}/children/'.format(API_BASE, public_project._id)
@pytest.fixture()
def view_only_link(self, private_project):
view_only_link = PrivateLinkFactory(name='node_view_only_link')
view_only_link.nodes.add(private_project)
view_only_link.save()
return view_only_link
def test_return_public_node_children_list(
self, app, public_component,
public_project_url):
# test_return_public_node_children_list_logged_out
res = app.get(public_project_url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
# test_return_public_node_children_list_logged_in
non_contrib = AuthUserFactory()
res = app.get(public_project_url, auth=non_contrib.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
def test_return_private_node_children_list(
self, app, user, component, private_project, private_project_url):
# test_return_private_node_children_list_logged_out
res = app.get(private_project_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.get(
private_project_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_contributor
res = app.get(private_project_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
# test_return_private_node_children_osf_group_member_admin
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, permissions.ADMIN)
res = app.get(private_project_url, auth=group_mem.auth)
assert res.status_code == 200
# Can view node children that you have implict admin permissions
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
def test_node_children_list_does_not_include_pointers(
self, app, user, component, private_project_url):
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_unauthorized_projects(
self, app, user, component, private_project, private_project_url):
NodeFactory(parent=private_project)
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_deleted(
self, app, user, public_project, public_component,
component, public_project_url):
child_project = NodeFactory(parent=public_project, creator=user)
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id in ids
assert 2 == len(ids)
child_project.is_deleted = True
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id not in ids
assert 1 == len(ids)
def test_node_children_list_does_not_include_node_links(
self, app, user, public_project, public_component,
public_project_url):
pointed_to = ProjectFactory(is_public=True)
public_project.add_pointer(
pointed_to,
auth=Auth(public_project.creator)
)
res = app.get(public_project_url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert public_component._id in ids # sanity check
assert pointed_to._id not in ids
# Regression test for https://openscience.atlassian.net/browse/EMB-593
# Duplicates returned in child count
def test_node_children_related_counts_duplicate_query_results(self, app, user, public_project,
private_project, public_project_url):
user_2 = AuthUserFactory()
# Adding a child component
child = NodeFactory(parent=public_project, creator=user, is_public=True, category='software')
child.add_contributor(user_2, permissions.WRITE, save=True)
# Adding a grandchild
NodeFactory(parent=child, creator=user, is_public=True)
# Adding a node link
public_project.add_pointer(
private_project,
auth=Auth(public_project.creator)
)
# Assert NodeChildrenList returns one result
res = app.get(public_project_url, auth=user.auth)
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == child._id
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Verifying related_counts match direct children count (grandchildren not included, pointers not included)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_node_children_related_counts(self, app, user, public_project):
parent = ProjectFactory(creator=user, is_public=False)
user_2 = AuthUserFactory()
parent.add_contributor(user_2, permissions.ADMIN)
child = NodeFactory(parent=parent, creator=user_2, is_public=False, category='software')
NodeFactory(parent=child, creator=user_2, is_public=False)
# child has one component. `user` can view due to implict admin perms
component_url = '/{}nodes/{}/children/'.format(API_BASE, child._id, auth=user.auth)
res = app.get(component_url, auth=user.auth)
assert len(res.json['data']) == 1
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, child._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Nodes with implicit admin perms are also included in the count
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_child_counts_permissions(self, app, user, public_project):
NodeFactory(parent=public_project, creator=user)
url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
user_two = AuthUserFactory()
# Unauthorized
res = app.get(url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in noncontrib
res = app.get(url, auth=user_two.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in contrib
res = app.get(url, auth=user.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_private_node_children_with_view_only_link(self, user, app, private_project,
component, view_only_link, private_project_url):
# get node related_counts with vol before vol is attached to components
node_url = '/{}nodes/{}/?related_counts=children&view_only={}'.format(API_BASE,
private_project._id, view_only_link.key)
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# view only link is not attached to components
view_only_link_url = '{}?view_only={}'.format(private_project_url, view_only_link.key)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert len(ids) == 0
assert component._id not in ids
# view only link is attached to components
view_only_link.nodes.add(component)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert component._id in ids
assert 'contributors' in res.json['data'][0]['relationships']
assert 'implicit_contributors' in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' in res.json['data'][0]['relationships']
# get node related_counts with vol once vol is attached to components
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
# make private vol anonymous
view_only_link.anonymous = True
view_only_link.save()
res = app.get(view_only_link_url)
assert 'contributors' not in res.json['data'][0]['relationships']
assert 'implicit_contributors' not in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' not in res.json['data'][0]['relationships']
# delete vol
view_only_link.is_deleted = True
view_only_link.save()
res = app.get(view_only_link_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestNodeChildrenListFiltering:
def test_node_child_filtering(self, app, user):
project = ProjectFactory(creator=user)
title_one, title_two = fake.bs(), fake.bs()
component = NodeFactory(title=title_one, parent=project)
component_two = NodeFactory(title=title_two, parent=project)
url = '/{}nodes/{}/children/?filter[title]={}'.format(
API_BASE,
project._id,
title_one
)
res = app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert component._id in ids
assert component_two._id not in ids
@pytest.mark.django_db
class TestNodeChildCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child(self):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
}
def test_creates_child(self, app, user, project, child, url):
# test_creates_child_logged_out_user
res = app.post_json_api(url, child, expect_errors=True)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user), save=True
)
res = app.post_json_api(
url, child, auth=read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url, child, auth=non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_group_member_read
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project.add_osf_group(group, permissions.READ)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 403
project.update_osf_group(group, permissions.WRITE)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 201
# test_creates_child_no_type
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_creates_child_incorrect_type
child = {
'data': {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
# test_creates_child_properties_not_nested
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project'
},
'category': 'project'
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field is required.'
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/category'
def test_creates_child_logged_in_write_contributor(
self, app, user, project, child, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(url, child, auth=write_contrib.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_creates_child_logged_in_owner(
self, app, user, project, child, url):
res = app.post_json_api(url, child, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
assert res.json['data']['id'] == project.nodes[0]._id
assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasonable</em> <strong>Project</strong>'
description = 'An <script>alert("even reasonabler")</script> child'
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}
}, auth=user.auth)
child_id = res.json['data']['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_create_child_on_a_registration(self, app, user, project):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestNodeChildrenBulkCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child_one(self):
return {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
@pytest.fixture()
def child_two(self):
return {
'type': 'nodes',
'attributes': {
'title': 'second child',
'description': 'this is my hypothesis',
'category': 'hypothesis'
}
}
def test_bulk_children_create_blank_request(self, app, user, url):
res = app.post_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
def test_bulk_creates_children_limits(self, app, user, child_one, url):
res = app.post_json_api(
url, {'data': [child_one] * 101},
auth=user.auth, expect_errors=True, bulk=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
def test_bulk_creates_children_auth_errors(
self, app, user, project, child_one, child_two, url):
# test_bulk_creates_children_logged_out_user
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
expect_errors=True, bulk=True
)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_logged_in_owner(
self, app, user, project, child_one, child_two, url):
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
nodes = project.nodes
assert res.json['data'][0]['id'] == nodes[0]._id
assert res.json['data'][1]['id'] == nodes[1]._id
assert nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_child_logged_in_write_contributor(
self, app, user, project, child_one, child_two, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=write_contrib.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
child_id = res.json['data'][0]['id']
child_two_id = res.json['data'][1]['id']
nodes = project.nodes
assert child_id == nodes[0]._id
assert child_two_id == nodes[1]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasoning</em> <strong>Aboot Projects</strong>'
description = 'A <script>alert("super reasonable")</script> child'
res = app.post_json_api(url, {
'data': [{
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}]
}, auth=user.auth, bulk=True)
child_id = res.json['data'][0]['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_bulk_create_children_on_a_registration(
self, app, user, project, child_two):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': [child_two, {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}]
}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 404
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_payload_errors(
self, app, user, project, child_two, url):
# def test_bulk_creates_children_no_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_incorrect_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_properties_not_nested(self, app, user,
# project, child_two, url):
child = {
'data': [child_two, {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field is required.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/attributes/category'
project.reload()
assert len(project.nodes) == 0
| saradbowman/osf.io | api_tests/nodes/views/test_node_children_list.py | Python | apache-2.0 | 29,554 |
# Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fastfood Chef Cookbook manager."""
from __future__ import print_function
import os
from fastfood import utils
class CookBook(object):
"""Chef Cookbook object.
Understands metadata.rb, Berksfile and how to parse them.
"""
def __init__(self, path):
"""Initialize CookBook wrapper at 'path'."""
self.path = utils.normalize_path(path)
self._metadata = None
if not os.path.isdir(path):
raise ValueError("Cookbook dir %s does not exist."
% self.path)
self._berksfile = None
@property
def name(self):
"""Cookbook name property."""
try:
return self.metadata.to_dict()['name']
except KeyError:
raise LookupError("%s is missing 'name' attribute'."
% self.metadata)
@property
def metadata(self):
"""Return dict representation of this cookbook's metadata.rb ."""
self.metadata_path = os.path.join(self.path, 'metadata.rb')
if not os.path.isfile(self.metadata_path):
raise ValueError("Cookbook needs metadata.rb, %s"
% self.metadata_path)
if not self._metadata:
self._metadata = MetadataRb(open(self.metadata_path, 'r+'))
return self._metadata
@property
def berksfile(self):
"""Return this cookbook's Berksfile instance."""
self.berks_path = os.path.join(self.path, 'Berksfile')
if not self._berksfile:
if not os.path.isfile(self.berks_path):
raise ValueError("No Berksfile found at %s"
% self.berks_path)
self._berksfile = Berksfile(open(self.berks_path, 'r+'))
return self._berksfile
class MetadataRb(utils.FileWrapper):
"""Wrapper for a metadata.rb file."""
@classmethod
def from_dict(cls, dictionary):
"""Create a MetadataRb instance from a dict."""
cookbooks = set()
# put these in order
groups = [cookbooks]
for key, val in dictionary.items():
if key == 'depends':
cookbooks.update({cls.depends_statement(cbn, meta)
for cbn, meta in val.items()})
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def depends_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'depends' statement for the metadata.rb file."""
line = "depends '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Stencil dependency options for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
if metadata:
line = "%s '%s'" % (line, "', '".join(metadata))
return line
def to_dict(self):
"""Return a dictionary representation of this metadata.rb file."""
return self.parse()
def parse(self):
"""Parse the metadata.rb into a dict."""
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
depends = {}
for line in data:
if not len(line) == 2:
continue
key, value = line
if key == 'depends':
value = value.split(',')
lib = utils.ruby_strip(value[0])
detail = [utils.ruby_strip(j) for j in value[1:]]
depends[lib] = detail
datamap = {key: utils.ruby_strip(val) for key, val in data}
if depends:
datamap['depends'] = depends
self.seek(0)
return datamap
def merge(self, other):
"""Add requirements from 'other' metadata.rb into this one."""
if not isinstance(other, MetadataRb):
raise TypeError("MetadataRb to merge should be a 'MetadataRb' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
meta_writelines = ['%s\n' % self.depends_statement(cbn, meta)
for cbn, meta in new.get('depends', {}).items()
if cbn not in current.get('depends', {})]
self.write_statements(meta_writelines)
return self.to_dict()
class Berksfile(utils.FileWrapper):
"""Wrapper for a Berksfile."""
berks_options = [
'branch',
'git',
'path',
'ref',
'revision',
'tag',
]
def to_dict(self):
"""Return a dictionary representation of this Berksfile."""
return self.parse()
def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True
elif len(line) == 2:
key, value = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
lib, detail = value[0], value[1:]
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and not any("".join(detail).startswith(o)
for o in self.berks_options):
constraint, detail = detail[0], detail[1:]
datamap['cookbook'][lib]['constraint'] = constraint
if detail:
for deet in detail:
opt, val = [
utils.ruby_strip(i)
for i in deet.split(':', 1)
]
if not any(opt == o for o in self.berks_options):
raise ValueError(
"Cookbook detail '%s' does not specify "
"one of '%s'" % (opt, self.berks_options))
else:
datamap['cookbook'][lib][opt.strip(':')] = (
utils.ruby_strip(val))
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value))
elif key:
datamap[key] = utils.ruby_strip(value)
self.seek(0)
return datamap
@classmethod
def from_dict(cls, dictionary):
"""Create a Berksfile instance from a dict."""
cookbooks = set()
sources = set()
other = set()
# put these in order
groups = [sources, cookbooks, other]
for key, val in dictionary.items():
if key == 'cookbook':
cookbooks.update({cls.cookbook_statement(cbn, meta)
for cbn, meta in val.items()})
elif key == 'source':
sources.update({"source '%s'" % src for src in val})
elif key == 'metadata':
other.add('metadata')
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def cookbook_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'cookbook' statement for the Berksfile."""
line = "cookbook '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Berksfile dependency hash for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
# not like the others...
if 'constraint' in metadata:
line += ", '%s'" % metadata.pop('constraint')
for opt, spec in metadata.items():
line += ", %s: '%s'" % (opt, spec)
return line
def merge(self, other):
"""Add requirements from 'other' Berksfile into this one."""
if not isinstance(other, Berksfile):
raise TypeError("Berksfile to merge should be a 'Berksfile' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
berks_writelines = ['%s\n' % self.cookbook_statement(cbn, meta)
for cbn, meta in new.get('cookbook', {}).items()
if cbn not in current.get('cookbook', {})]
# compare and gather 'source' requirements
berks_writelines.extend(["source '%s'\n" % src for src
in new.get('source', [])
if src not in current.get('source', [])])
self.write_statements(berks_writelines)
return self.to_dict()
| samstav/fastfood | fastfood/book.py | Python | apache-2.0 | 10,125 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute import flavors as flavors_api
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""
The Flavor Lifecycle API controller for the OpenStack API.
"""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
@wsgi.serializers(xml=flavors_api.FlavorTemplate)
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
flavorid = vals.get('id')
memory = vals.get('ram')
vcpus = vals.get('vcpus')
root_gb = vals.get('disk')
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
if not flavor['is_public']:
flavors.add_flavor_access(flavor['flavorid'],
context.project_id, context)
req.cache_db_flavor(flavor)
except (exception.InstanceTypeExists,
exception.InstanceTypeIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""
Flavor create/delete API support
"""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| ntt-sic/nova | nova/api/openstack/compute/contrib/flavormanage.py | Python | apache-2.0 | 3,894 |
# Copyright (c) 2013-2016 Cinchapi Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import *
import os
import time
from subprocess import *
import signal
from . import test_data
from concourse import Concourse, Tag, Link, Diff, Operator, constants
from concourse.thriftapi.shared.ttypes import Type
from concourse.utils import python_to_thrift
import ujson
from tests import ignore
import socket
class IntegrationBaseTest(object):
"""
Base class for unit tests that use Mockcourse.
"""
port = None
process = None
client = None
expected_network_latency = 0.05
@classmethod
def setup_class(cls):
""" Fixture method to start Mockcourse and connect before the tests start to run.
"""
port = IntegrationBaseTest.get_open_port()
dir = os.path.dirname(os.path.realpath(__file__)) + '/../../mockcourse'
script = dir + '/mockcourse '+str(port)
cls.process = Popen(script, shell=True, preexec_fn=os.setsid)
cls.client = None
tries = 5
while tries > 0 and cls.client is None:
tries -= 1
time.sleep(1) # Wait for Mockcourse to start
try:
cls.client = Concourse.connect(port=port)
except RuntimeError as e:
if tries == 0:
raise e
else:
continue
@classmethod
def teardown_class(cls):
""" Fixture method to kill Mockcourse after all the tests have fun.
"""
os.killpg(cls.process.pid, signal.SIGTERM)
def tearDown(self):
"""" Logout" and clear all the data that the client stored in Mockcourse after each test. This ensures that the
environment for each test is clean and predicatable.
"""
self.client.logout() # Mockcourse logout simply clears the content of the datastore
def get_time_anchor(self):
""" Return a time anchor and sleep for long enough to account for network latency
"""
anchor = test_data.current_time_millis()
time.sleep(self.expected_network_latency)
return anchor
@staticmethod
def get_open_port():
"""Return an open port that is chosen by the OS
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
sock.close()
return port
class TestPythonClientDriver(IntegrationBaseTest):
"""
Implementations for standard unit tests that verify the Python client driver
conforms to the Concourse standard
"""
def __do_test_value_round_trip(self, value, ttype):
"""
Do the round_trip test logic for the specified value of the specified type
:param value:
"""
key = test_data.random_string()
record = self.client.add(key=key, value=value)
stored = self.client.get(key=key, record=record)
assert_equal(value, stored)
assert_equal(python_to_thrift(stored).type, ttype)
def test_string_round_trip(self):
self.__do_test_value_round_trip(test_data.random_string(), Type.STRING)
def test_bool_round_trip(self):
self.__do_test_value_round_trip(test_data.random_bool(), Type.BOOLEAN)
def test_tag_round_trip(self):
self.__do_test_value_round_trip(Tag.create(test_data.random_string()), Type.TAG)
def test_link_round_trip(self):
self.__do_test_value_round_trip(Link.to(test_data.random_int()), Type.LINK)
def test_int_round_trip(self):
self.__do_test_value_round_trip(test_data.random_int(), Type.INTEGER)
self.__do_test_value_round_trip(2147483647, Type.INTEGER)
self.__do_test_value_round_trip(-2147483648, Type.INTEGER)
def test_long_round_trip(self):
self.__do_test_value_round_trip(2147483648, Type.LONG)
self.__do_test_value_round_trip(-2147483649, Type.LONG)
self.__do_test_value_round_trip(test_data.random_long(), Type.LONG)
def test_float_round_trip(self):
self.__do_test_value_round_trip(3.4028235E38, Type.DOUBLE)
self.__do_test_value_round_trip(-1.4E-45, Type.DOUBLE)
def test_abort(self):
self.client.stage()
key = test_data.random_string()
value = "some value"
record = 1
self.client.add(key=key, value=value, record=record)
self.client.abort()
assert_is_none(self.client.get(key=key, record=record))
def test_add_key_value(self):
key = test_data.random_string()
value = "static value"
record = self.client.add(key=key, value=value)
assert_is_not_none(record)
stored = self.client.get(key=key, record=record)
assert_equal(stored, value)
def test_add_key_value_record(self):
key = test_data.random_string()
value = "static value"
record = 17
assert_true(self.client.add(key=key, value=value, record=record))
stored = self.client.get(key=key, record=record)
assert_equal(stored, value)
def test_add_key_value_records(self):
key = test_data.random_string()
value = "static value"
records = [1, 2, 3]
result = self.client.add(key=key, value=value, records=records)
assert_true(isinstance(result, dict))
assert_true(result.get(1))
assert_true(result.get(2))
assert_true(result.get(3))
def test_audit_key_record(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1000
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record)
assert_equal(5, len(audit))
expected = 'ADD'
for k, v in audit.items():
assert_true(v.startswith(expected))
expected = 'REMOVE' if expected == 'ADD' else 'ADD'
def test_audit_key_record_start(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1001
for value in values:
self.client.set(key, value, record)
start = self.client.time()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record, start=start)
assert_equal(6, len(audit))
def test_audit_key_record_start_end(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1002
for value in values:
self.client.set(key, value, record)
start = self.client.time()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
end = self.client.time()
values = [True, False]
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record, start=start, end=end)
assert_equal(6, len(audit))
def test_audit_key_record_startstr(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1001
for value in values:
self.client.set(key, value, record)
anchor = self.get_time_anchor()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
start = test_data.get_elapsed_millis_string(anchor)
audit = self.client.audit(key, record, start=start)
assert_equal(6, len(audit))
def test_audit_key_record_startstr_endstr(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1002
for value in values:
self.client.set(key, value, record)
start_anchor = self.get_time_anchor()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
end_anchor = self.get_time_anchor()
values = [True, False]
for value in values:
self.client.set(key, value, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
audit = self.client.audit(key, record, start=start, end=end)
assert_equal(6, len(audit))
def test_audit_record(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "foo"
record = 1002
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
audit = self.client.audit(record)
assert_equal(3, len(audit))
def test_audit_record_start(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = self.client.time()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
audit = self.client.audit(record, start=start)
assert_equal(3, len(audit))
def test_audit_record_start_end(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = self.client.time()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
end = self.client.time()
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
audit = self.client.audit(record, start=start, end=end)
assert_equal(3, len(audit))
def test_audit_record_startstr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
anchor = self.get_time_anchor()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
start = test_data.get_elapsed_millis_string(anchor)
audit = self.client.audit(record, start=start)
assert_equal(3, len(audit))
def test_audit_record_startstr_endstr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start_anchor = self.get_time_anchor()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
end_anchor = self.get_time_anchor()
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
audit = self.client.audit(record, start=start, end=end)
assert_equal(3, len(audit))
def test_browse_key(self):
key = test_data.random_string()
value = 10
self.client.add(key, value, [1, 2, 3])
value = test_data.random_string()
self.client.add(key, value, [10, 20, 30])
data = self.client.browse(key)
assert_equal([1, 2, 3], data.get(10))
assert_equal([20, 10, 30], data.get(value))
def test_browse_keys(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
data = self.client.browse([key1, key2, key3])
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_keys_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
time = self.client.time()
self.client.add(key1, "Foo")
self.client.add(key2, "Foo")
self.client.add(key3, "Foo")
data = self.client.browse([key1, key2, key3], time=time)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_key_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
ts = test_data.get_elapsed_millis_string(self.get_time_anchor())
data = self.client.browse([key1, key2, key3], time=ts)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
@ignore
def test_browse_keys_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
anchor = self.get_time_anchor()
self.client.add(key1, "D", record1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.browse([key1, key2, key3], time=ts)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_key_time(self):
key = test_data.random_string()
value = 10
self.client.add(key, value, [1, 2, 3])
value = test_data.random_string()
self.client.add(key, value, [10, 20, 30])
timestamp = self.client.time()
self.client.add(key=key, value=True)
data = self.client.browse(key, timestamp)
assert_equal([1, 2, 3], data.get(10))
assert_equal([20, 10, 30], data.get(value))
def test_chronologize_key_record(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record)
assert_equal([[1], [1, 2], [1, 2, 3], [2, 3], [3]], list(data.values()))
def test_chronologize_key_record_start(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start = self.client.time()
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record, time=start)
assert_equal([[2, 3], [3]], list(data.values()))
def test_chronologize_key_record_start_end(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start = self.client.time()
self.client.remove(key, 1, record)
end = self.client.time()
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record, timestamp=start, end=end)
assert_equal([[2, 3]], list(data.values()))
def test_chronologize_key_record_startstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
anchor = self.get_time_anchor()
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
start = test_data.get_elapsed_millis_string(anchor)
data = self.client.chronologize(key, record, time=start)
assert_equal([[2, 3], [3]], list(data.values()))
def test_chronologize_key_record_startstr_endstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start_anchor = self.get_time_anchor()
self.client.remove(key, 1, record)
end_anchor = self.get_time_anchor()
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
data = self.client.chronologize(key, record, timestamp=start, end=end)
assert_equal([[2, 3]], list(data.values()))
def test_clear_key_record(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
self.client.clear(key=key, record=record)
data = self.client.select(key=key, record=record)
assert_equal([], data)
def test_clear_key_records(self):
key = test_data.random_string()
records = [1, 2, 3]
self.client.add(key, 1, records)
self.client.add(key, 2, records)
self.client.add(key, 3, records)
self.client.clear(key=key, records=records)
data = self.client.select(key=key, records=records)
assert_equal({}, data)
def test_clear_keys_record(self):
key1 = test_data.random_string(6)
key2 = test_data.random_string(7)
key3 = test_data.random_string(8)
record = test_data.random_long()
self.client.add(key1, 1, record)
self.client.add(key2, 2, record)
self.client.add(key3, 3, record)
self.client.clear(keys=[key1, key2, key3], record=record)
data = self.client.select(keys=[key1, key2, key3], record=record)
assert_equal({}, data)
def test_clear_keys_records(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True],
'd': 'D'
}
records = [1, 2, 3]
self.client.insert(data=data, records=records)
self.client.clear(keys=['a', 'b', 'c'], records=records)
data = self.client.get(key='d', records=records)
assert_equal({
1: 'D',
2: 'D',
3: 'D'
}, data)
def test_clear_record(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True]
}
record = next(iter(self.client.insert(data)))
self.client.clear(record=record)
data = self.client.select(record=record)
assert_equal({}, data)
def test_clear_records(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True],
'd': 'D'
}
records = [1, 2, 3]
self.client.insert(data=data, records=records)
self.client.clear(records=records)
data = self.client.select(records=records)
assert_equal({1: {}, 2: {}, 3: {}}, data)
def test_commit(self):
self.client.stage()
record = self.client.add("name", "jeff nelson")
self.client.commit()
assert_equal(['name'], list(self.client.describe(record)))
def test_describe_record(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
keys = self.client.describe(1)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_record_time(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
timestamp = self.client.time()
self.client.clear('name', 1)
keys = self.client.describe(1, time=timestamp)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_record_timestr(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
anchor = self.get_time_anchor()
self.client.clear('name', 1)
timestamp = test_data.get_elapsed_millis_string(anchor)
keys = self.client.describe(1, time=timestamp)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_records(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
keys = self.client.describe(records)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_describe_records_time(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
timestamp = self.client.time()
self.client.clear(records=records)
keys = self.client.describe(records, timestamp=timestamp)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_describe_records_timestr(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
anchor = self.get_time_anchor()
self.client.clear(records=records)
timestamp = test_data.get_elapsed_millis_string(anchor)
keys = self.client.describe(records, timestamp=timestamp)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_diff_key_record_start(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start = self.client.time()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
diff = self.client.diff(key, record, start)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_startstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
anchor = self.get_time_anchor()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(key, record, start)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_start_end(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start = self.client.time()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
end = self.client.time()
self.client.set(key, 3, record)
diff = self.client.diff(key, record, start, end)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_startstr_endstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start_anchor = self.get_time_anchor()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
end_anchor = self.get_time_anchor()
self.client.set(key, 3, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(key, record, start, end)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_start(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start = self.client.time()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
diff = self.client.diff(key=key, start=start)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_startstr(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
anchor = self.get_time_anchor()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(key=key, start=start)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_start_end(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start = self.client.time()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
end = self.client.time()
self.client.add(key=key, value=4, record=1)
diff = self.client.diff(key=key, start=start, end=end)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_startstr_endstr(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start_anchor = self.get_time_anchor()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
end_anchor = self.get_time_anchor()
self.client.add(key=key, value=4, record=1)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(key=key, start=start, end=end)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_record_start(self):
self.client.add(key="foo", value=1, record=1)
start = self.client.time()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
diff = self.client.diff(record=1, time=start)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_startstr(self):
self.client.add(key="foo", value=1, record=1)
anchor = self.get_time_anchor()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(record=1, time=start)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_start_end(self):
self.client.add(key="foo", value=1, record=1)
start = self.client.time()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
end = self.client.time()
self.client.set(key="car", value=100, record=1)
diff = self.client.diff(record=1, time=start, end=end)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_startstr_endstr(self):
self.client.add(key="foo", value=1, record=1)
start_anchor = self.get_time_anchor()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
end_anchor = self.get_time_anchor()
self.client.set(key="car", value=100, record=1)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(record=1, time=start, end=end)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_find_ccl(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key+' > 3'))
assert_equal(list(range(4, 10)), records)
@raises(Exception)
def test_find_ccl_handle_parse_exception(self):
self.client.find(ccl="throw parse exception")
def test_find_key_operator_value(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_values(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6]))
assert_equal([3, 4, 5], records)
def test_find_key_operator_value_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_value_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_values_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operator_values_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator="bw", values=[3, 6]))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_value(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator="gt", value=3))
assert_equal(list(range(4, 10)), records)
def test_find_key_operatorstr_value_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator="gt", value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operatorstr_value_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator="gt", value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_get_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ccl = key2 + ' = 10'
data = self.client.get(ccl=ccl)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ts = self.client.time()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
data = self.client.get(ccl=ccl, time=ts)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(ccl=ccl, time=ts)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_key_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.get(key=key1, ccl=ccl)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.get(keys=[key1, key2], ccl=ccl)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.get(key=key1, ccl=ccl, time=ts)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.get(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(key=key1, ccl=ccl, time=ts)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
assert_equal(3, self.client.get(key='foo', record=1))
def test_get_key_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
ts = self.client.time()
self.client.add('foo', 4, 1)
assert_equal(3, self.client.get(key='foo', record=1, time=ts))
def test_get_key_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 4, 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal(3, self.client.get(key='foo', record=1, time=ts))
def test_get_key_records(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3]))
def test_get_key_records_time(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
ts = self.client.time()
self.client.add('foo', 4, [1, 2, 3])
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3], time=ts))
def test_get_key_records_timestr(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
anchor = self.get_time_anchor()
self.client.add('foo', 4, [1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3], time=ts))
def test_get_keys_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
data = self.client.get(keys=['foo', 'bar'], record=1)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
ts = self.client.time()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
data = self.client.get(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_get_keys_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_get_keys_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.get(keys=['foo', 'bar'], records=[1, 2])
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_insert_dict(self):
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
record = self.client.insert(data=data)[0]
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_dicts(self):
data = [
{
'foo': 1
},
{
'foo': 2
},
{
'foo': 3
}
]
records = self.client.insert(data=data)
assert_equal(len(data), len(records))
def test_insert_json(self):
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
record = self.client.insert(data=data)[0]
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_json_list(self):
data = [
{
'foo': 1
},
{
'foo': 2
},
{
'foo': 3
}
]
count = len(data)
data = ujson.dumps(data)
records = self.client.insert(data=data)
assert_equal(count, len(records))
def test_insert_dict_record(self):
record = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
result = self.client.insert(data=data, record=record)
assert_true(result)
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_json_record(self):
record = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
result = self.client.insert(data=data, record=record)
assert_true(result)
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_dict_records(self):
record1 = test_data.random_long()
record2 = test_data.random_long()
record3 = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
result = self.client.insert(data=data, records=[record1, record2, record3])
assert_true({
record1: True,
record2: True,
record3: True
}, result)
def test_insert_json_records(self):
record1 = test_data.random_long()
record2 = test_data.random_long()
record3 = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
result = self.client.insert(data=data, records=[record1, record2, record3])
assert_true({
record1: True,
record2: True,
record3: True
}, result)
def test_inventory(self):
records = [1, 2, 3, 4, 5, 6, 7]
self.client.add(key='foo', value=17, records=records)
assert_equal(records, self.client.inventory())
def test_jsonify_records(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2])
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
def test_jsonify_records_identifier(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2], id=True)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_jsonify_records_time(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
ts = self.client.time()
self.client.add('foo', 10, [record1, record2])
dump = self.client.jsonify(records=[record1, record2], time=ts)
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
@ignore
def test_jsonify_records_timestr(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.add('foo', 10, [record1, record2])
ts = test_data.get_elapsed_millis_string(anchor)
dump = self.client.jsonify(records=[record1, record2], time=ts)
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
def test_jsonify_records_identifier_time(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
ts = self.client.time()
self.client.add(key='foo', value=True, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2], id=True, time=ts)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_jsonify_records_identifier_timestr(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.add(key='foo', value=True, records=[record1, record2])
ts = test_data.get_elapsed_millis_string(anchor)
dump = self.client.jsonify(records=[record1, record2], id=True, time=ts)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_ping_record(self):
record = 1
assert_false(self.client.ping(record))
self.client.add(key='foo', value=1, record=record)
assert_true(self.client.ping(record))
self.client.clear(key='foo', record=record)
assert_false(self.client.ping(record))
def test_ping_records(self):
self.client.add(key='foo', value=1, records=[1, 2])
data = self.client.ping([1, 2, 3])
assert_equal({
1: True,
2: True,
3: False
}, data)
def test_remove_key_value_record(self):
key = 'foo'
value = 1
record = 1
assert_false(self.client.remove(key, value, record))
self.client.add(key, value, record)
assert_true(self.client.remove(key=key, record=record, value=value))
def test_remove_key_value_records(self):
key = 'foo'
value = 1
self.client.add(key, value, records=[1, 2])
data = self.client.remove(key, value, records=[1, 2, 3])
assert_equal({
1: True,
2: True,
3: False
}, data)
def test_revert_key_records_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key='one', records=[1, 2, 3], time=ts)
data = self.client.select(key='one', record=[1, 2, 3])
assert_equal({
1: [1],
2: [1],
3: [1]
}, data)
def test_revert_key_records_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key='one', records=[1, 2, 3], time=ts)
data = self.client.select(key='one', record=[1, 2, 3])
assert_equal({
1: [1],
2: [1],
3: [1]
}, data)
def test_revert_keys_records_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3])
data3 = {
'one': [1],
'two': [2],
'three': [3]
}
assert_equal({
1: data3,
2: data3,
3: data3
}, data)
def test_revert_keys_records_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3])
data3 = {
'one': [1],
'two': [2],
'three': [3]
}
assert_equal({
1: data3,
2: data3,
3: data3
}, data)
def test_revert_keys_record_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key=['one', 'two', 'three'], records=1, time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=1)
assert_equal({
'one': [1],
'two': [2],
'three': [3]
}, data)
def test_revert_keys_record_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key=['one', 'two', 'three'], records=1, time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=1)
assert_equal({
'one': [1],
'two': [2],
'three': [3]
}, data)
def test_revert_key_record_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key='one', records=1, time=ts)
data = self.client.select(key='one', record=1)
assert_equal([1], data)
def test_revert_key_record_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key='one', records=1, time=ts)
data = self.client.select(key='one', record=1)
assert_equal([1], data)
def test_search(self):
self.client.add(key="name", value="jeff", record=1)
self.client.add(key="name", value="jeffery", record=2)
self.client.add(key="name", value="jeremy", record=3)
self.client.add(key="name", value="ben jefferson", record=4)
records = self.client.search(key="name", query="jef")
assert_equal([1, 2, 4], records)
def test_select_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ccl = key2 + ' = 10'
data = self.client.select(ccl=ccl)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ts = self.client.time()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
data = self.client.select(ccl=ccl, time=ts)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(ccl=ccl, time=ts)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_key_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.select(key=key1, ccl=ccl)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.select(keys=[key1, key2], ccl=ccl)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.select(key=key1, ccl=ccl, time=ts)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.select(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(key=key1, ccl=ccl, time=ts)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1))
def test_select_key_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
ts = self.client.time()
self.client.add('foo', 4, 1)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts))
def test_select_key_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 4, 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts))
def test_select_key_records(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3]))
def test_select_key_records_time(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
ts = self.client.time()
self.client.add('foo', 4, [1, 2, 3])
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3], time=ts))
def test_select_key_records_timestr(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
anchor = self.get_time_anchor()
self.client.add('foo', 4, [1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3], time=ts))
def test_select_keys_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
data = self.client.select(keys=['foo', 'bar'], record=1)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
ts = self.client.time()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
data = self.client.select(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_keys_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_keys_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(keys=['foo', 'bar'], records=[1, 2])
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_record(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(record=1)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_record_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select(record=2, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_record_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(record=2, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(records=[1, 2])
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select( records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select( records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_set_key_value(self):
key = "foo"
value = 1
record = self.client.set(key=key, value=value)
data = self.client.select(record=record)
assert_equal({
'foo': [1]
}, data)
def test_set_key_value_record(self):
key = "foo"
value = 1
record = 1
self.client.add(key=key, value=2, record=record)
self.client.add(key=key, value=2, record=record)
self.client.set(key=key, value=value, record=record)
data = self.client.select(record=record)
assert_equal({
'foo': [1]
}, data)
def test_set_key_value_records(self):
key = "foo"
value = 1
records = [1, 2, 3]
self.client.add(key=key, value=2, record=records)
self.client.add(key=key, value=2, record=records)
self.client.set(key=key, value=value, record=records)
data = self.client.select(record=records)
expected = {
'foo': [1]
}
assert_equal({
1: expected,
2: expected,
3: expected
}, data)
def test_stage(self):
assert_is_none(self.client.transaction)
self.client.stage()
assert_is_not_none(self.client.transaction)
self.client.abort()
def test_time(self):
assert_true(isinstance(self.client.time(), int))
def test_time_phrase(self):
assert_true(isinstance(self.client.time("3 seconds ago"), int))
def test_verify_and_swap(self):
self.client.add("foo", 2, 2)
assert_false(self.client.verify_and_swap(key='foo', expected=1, record=2, replacement=3))
assert_true(self.client.verify_and_swap(key='foo', expected=2, record=2, replacement=3))
assert_equal(3, self.client.get(key='foo', record=2))
def test_verify_or_set(self):
self.client.add("foo", 2, 2)
self.client.verify_or_set(key='foo', value=3, record=2)
assert_equal(3, self.client.get(key='foo', record=2))
def test_verify_key_value_record(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
assert_true(self.client.verify('name', 'jeff', 1))
self.client.remove('name', 'jeff', 1)
assert_false(self.client.verify('name', 'jeff', 1))
def test_verify_key_value_record_time(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
ts = self.client.time()
self.client.remove('name', 'jeff', 1)
assert_true(self.client.verify('name', 'jeff', 1, time=ts))
def test_verify_key_value_record_timestr(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
anchor = self.get_time_anchor()
self.client.remove('name', 'jeff', 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_true(self.client.verify('name', 'jeff', 1, time=ts))
def test_link_key_source_destination(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_equal(Link.to(2), self.client.get('friends', record=1))
def test_link_key_source_destinations(self):
assert_equal({
2: True,
3: True,
4: True
}, self.client.link(key='friends', source=1, destination=[2, 3, 4]))
def test_unlink_key_source_destination(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_true(self.client.unlink(key='friends', source=1, destination=2))
def test_unlink_key_source_destinations(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_equal({
2: True,
3: False
}, self.client.unlink(key='friends', source=1, destination=[2, 3]))
def test_find_or_add_key_value(self):
record = self.client.find_or_add("age", 23)
assert_equal(23, self.client.get("age", record))
def test_find_or_insert_ccl_json(self):
data = {
'name': 'jeff nelson'
}
data = ujson.dumps(data)
record = self.client.find_or_insert(criteria="age > 10", data=data)
assert_equal('jeff nelson', self.client.get("name", record))
def test_find_or_insert_ccl_dict(self):
data = {
'name': 'jeff nelson'
}
record = self.client.find_or_insert(criteria="age > 10", data=data)
assert_equal('jeff nelson', self.client.get("name", record))
def test_insert_dict_with_link(self):
data = {
'foo': Link.to(1)
}
record = self.client.insert(data=data)[0]
assert_equal(Link.to(1), self.client.get(key='foo', record=record))
def test_insert_dict_with_resolvable_link(self):
record1 = self.client.add('foo', 1)
record2 = self.client.insert(data={
'foo': Link.to_where('foo = 1')
})[0]
assert_equal(Link.to(record1), self.client.get(key='foo', record=record2)) | remiemalik/concourse | concourse-driver-python/tests/integration_tests.py | Python | apache-2.0 | 87,056 |
"""Support for OpenWRT (ubus) routers."""
import logging
import re
from openwrt.ubus import Ubus
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DHCP_SOFTWARE = "dhcp_software"
DEFAULT_DHCP_SOFTWARE = "dnsmasq"
DHCP_SOFTWARES = ["dnsmasq", "odhcpd", "none"]
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_DHCP_SOFTWARE, default=DEFAULT_DHCP_SOFTWARE): vol.In(
DHCP_SOFTWARES
),
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an ubus scanner."""
dhcp_sw = config[DOMAIN][CONF_DHCP_SOFTWARE]
if dhcp_sw == "dnsmasq":
scanner = DnsmasqUbusDeviceScanner(config[DOMAIN])
elif dhcp_sw == "odhcpd":
scanner = OdhcpdUbusDeviceScanner(config[DOMAIN])
else:
scanner = UbusDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
def _refresh_on_access_denied(func):
"""If remove rebooted, it lost our session so rebuild one and try again."""
def decorator(self, *args, **kwargs):
"""Wrap the function to refresh session_id on PermissionError."""
try:
return func(self, *args, **kwargs)
except PermissionError:
_LOGGER.warning(
"Invalid session detected."
" Trying to refresh session_id and re-run RPC"
)
self.ubus.connect()
return func(self, *args, **kwargs)
return decorator
class UbusDeviceScanner(DeviceScanner):
"""
This class queries a wireless router running OpenWrt firmware.
Adapted from Tomato scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.url = f"http://{host}/ubus"
self.ubus = Ubus(self.url, self.username, self.password)
self.hostapd = []
self.mac2name = None
self.success_init = self.ubus.connect() is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def _generate_mac2name(self):
"""Return empty MAC to name dict. Overridden if DHCP server is set."""
self.mac2name = {}
@_refresh_on_access_denied
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
self._generate_mac2name()
if self.mac2name is None:
# Generation of mac2name dictionary failed
return None
name = self.mac2name.get(device.upper(), None)
return name
@_refresh_on_access_denied
def _update_info(self):
"""Ensure the information from the router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking hostapd")
if not self.hostapd:
hostapd = self.ubus.get_hostapd()
self.hostapd.extend(hostapd.keys())
self.last_results = []
results = 0
# for each access point
for hostapd in self.hostapd:
if result := self.ubus.get_hostapd_clients(hostapd):
results = results + 1
# Check for each device is authorized (valid wpa key)
for key in result["clients"].keys():
device = result["clients"][key]
if device["authorized"]:
self.last_results.append(key)
return bool(results)
class DnsmasqUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the dnsmasq DHCP server."""
def __init__(self, config):
"""Initialize the scanner."""
super().__init__(config)
self.leasefile = None
def _generate_mac2name(self):
if self.leasefile is None:
if result := self.ubus.get_uci_config("dhcp", "dnsmasq"):
values = result["values"].values()
self.leasefile = next(iter(values))["leasefile"]
else:
return
result = self.ubus.file_read(self.leasefile)
if result:
self.mac2name = {}
for line in result["data"].splitlines():
hosts = line.split(" ")
self.mac2name[hosts[1].upper()] = hosts[3]
else:
# Error, handled in the ubus.file_read()
return
class OdhcpdUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the odhcp DHCP server."""
def _generate_mac2name(self):
if result := self.ubus.get_dhcp_method("ipv4leases"):
self.mac2name = {}
for device in result["device"].values():
for lease in device["leases"]:
mac = lease["mac"] # mac = aabbccddeeff
# Convert it to expected format with colon
mac = ":".join(mac[i : i + 2] for i in range(0, len(mac), 2))
self.mac2name[mac.upper()] = lease["hostname"]
else:
# Error, handled in the ubus.get_dhcp_method()
return
| jawilson/home-assistant | homeassistant/components/ubus/device_tracker.py | Python | apache-2.0 | 5,843 |
"""Support for Switchbot devices."""
from asyncio import Lock
import switchbot # pylint: disable=import-error
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SENSOR_TYPE, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from .const import (
ATTR_BOT,
ATTR_CURTAIN,
BTLE_LOCK,
COMMON_OPTIONS,
CONF_RETRY_COUNT,
CONF_RETRY_TIMEOUT,
CONF_SCAN_TIMEOUT,
CONF_TIME_BETWEEN_UPDATE_COMMAND,
DATA_COORDINATOR,
DEFAULT_RETRY_COUNT,
DEFAULT_RETRY_TIMEOUT,
DEFAULT_SCAN_TIMEOUT,
DEFAULT_TIME_BETWEEN_UPDATE_COMMAND,
DOMAIN,
)
from .coordinator import SwitchbotDataUpdateCoordinator
PLATFORMS_BY_TYPE = {
ATTR_BOT: [Platform.SWITCH, Platform.SENSOR],
ATTR_CURTAIN: [Platform.COVER, Platform.BINARY_SENSOR, Platform.SENSOR],
}
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Switchbot from a config entry."""
hass.data.setdefault(DOMAIN, {})
if not entry.options:
options = {
CONF_TIME_BETWEEN_UPDATE_COMMAND: DEFAULT_TIME_BETWEEN_UPDATE_COMMAND,
CONF_RETRY_COUNT: DEFAULT_RETRY_COUNT,
CONF_RETRY_TIMEOUT: DEFAULT_RETRY_TIMEOUT,
CONF_SCAN_TIMEOUT: DEFAULT_SCAN_TIMEOUT,
}
hass.config_entries.async_update_entry(entry, options=options)
# Use same coordinator instance for all entities.
# Uses BTLE advertisement data, all Switchbot devices in range is stored here.
if DATA_COORDINATOR not in hass.data[DOMAIN]:
# Check if asyncio.lock is stored in hass data.
# BTLE has issues with multiple connections,
# so we use a lock to ensure that only one API request is reaching it at a time:
if BTLE_LOCK not in hass.data[DOMAIN]:
hass.data[DOMAIN][BTLE_LOCK] = Lock()
if COMMON_OPTIONS not in hass.data[DOMAIN]:
hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options}
switchbot.DEFAULT_RETRY_TIMEOUT = hass.data[DOMAIN][COMMON_OPTIONS][
CONF_RETRY_TIMEOUT
]
# Store api in coordinator.
coordinator = SwitchbotDataUpdateCoordinator(
hass,
update_interval=hass.data[DOMAIN][COMMON_OPTIONS][
CONF_TIME_BETWEEN_UPDATE_COMMAND
],
api=switchbot,
retry_count=hass.data[DOMAIN][COMMON_OPTIONS][CONF_RETRY_COUNT],
scan_timeout=hass.data[DOMAIN][COMMON_OPTIONS][CONF_SCAN_TIMEOUT],
api_lock=hass.data[DOMAIN][BTLE_LOCK],
)
hass.data[DOMAIN][DATA_COORDINATOR] = coordinator
else:
coordinator = hass.data[DOMAIN][DATA_COORDINATOR]
await coordinator.async_config_entry_first_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
hass.data[DOMAIN][entry.entry_id] = {DATA_COORDINATOR: coordinator}
sensor_type = entry.data[CONF_SENSOR_TYPE]
hass.config_entries.async_setup_platforms(entry, PLATFORMS_BY_TYPE[sensor_type])
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
sensor_type = entry.data[CONF_SENSOR_TYPE]
unload_ok = await hass.config_entries.async_unload_platforms(
entry, PLATFORMS_BY_TYPE[sensor_type]
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.config_entries.async_entries(DOMAIN)) == 0:
hass.data.pop(DOMAIN)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
# Update entity options stored in hass.
if {**entry.options} != hass.data[DOMAIN][COMMON_OPTIONS]:
hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options}
hass.data[DOMAIN].pop(DATA_COORDINATOR)
await hass.config_entries.async_reload(entry.entry_id)
| home-assistant/home-assistant | homeassistant/components/switchbot/__init__.py | Python | apache-2.0 | 4,051 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemglobal_authenticationpolicy_binding(base_resource) :
""" Binding class showing the authenticationpolicy that can be bound to systemglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._builtin = []
self.___count = 0
@property
def priority(self) :
ur"""The priority of the command policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the command policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def builtin(self) :
ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE.
"""
try :
return self._builtin
except Exception as e:
raise e
@builtin.setter
def builtin(self, builtin) :
ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE
"""
try :
self._builtin = builtin
except Exception as e:
raise e
@property
def policyname(self) :
ur"""The name of the command policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""The name of the command policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemglobal_authenticationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemglobal_authenticationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemglobal_authenticationpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemglobal_authenticationpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
ur""" Use this API to fetch a systemglobal_authenticationpolicy_binding resources.
"""
try :
obj = systemglobal_authenticationpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
ur""" Use this API to fetch filtered set of systemglobal_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
ur""" Use this API to count systemglobal_authenticationpolicy_binding resources configued on NetScaler.
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
ur""" Use this API to count the filtered set of systemglobal_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
class systemglobal_authenticationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.systemglobal_authenticationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemglobal_authenticationpolicy_binding = [systemglobal_authenticationpolicy_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/system/systemglobal_authenticationpolicy_binding.py | Python | apache-2.0 | 6,831 |
"""
Drone.io badge generator.
Currently set up to work on Mac.
Requires Pillow.
"""
import os
from PIL import Image, ImageDraw, ImageFont
SIZE = (95, 18)
def hex_colour(hex):
if hex[0] == '#':
hex = hex[1:]
return (
int(hex[:2], 16),
int(hex[2:4], 16),
int(hex[4:6], 16),
)
BACKGROUND = hex_colour('#4A4A4A')
SUCCESS = hex_colour('#94B944')
WARNING = hex_colour('#E4A83C')
ERROR = hex_colour('#B10610')
SUCCESS_CUTOFF = 85
WARNING_CUTOFF = 45
FONT = ImageFont.truetype(size=10, filename="/Library/Fonts/Arial.ttf")
FONT_SHADOW = hex_colour('#525252')
PADDING_TOP = 3
def build_image(percentage, colour):
image = Image.new('RGB', SIZE, color=BACKGROUND)
drawing = ImageDraw.Draw(image)
drawing.rectangle([(55, 0), SIZE], colour, colour)
drawing.text((8, PADDING_TOP+1), 'coverage', font=FONT, fill=FONT_SHADOW)
drawing.text((7, PADDING_TOP), 'coverage', font=FONT)
drawing.text((63, PADDING_TOP+1), '%s%%' % percentage, font=FONT, fill=FONT_SHADOW)
drawing.text((62, PADDING_TOP), '%s%%' % percentage, font=FONT)
return image
os.chdir('_build')
for i in range(101):
filename = '%i.png' % i
file = open(filename, 'wb')
if i < WARNING_CUTOFF:
build_image(i, ERROR).save(file)
elif i < SUCCESS_CUTOFF:
build_image(i, WARNING).save(file)
else:
build_image(i, SUCCESS).save(file)
| 21strun/django-coverage | assets/badges/drone.io/badge_maker.py | Python | apache-2.0 | 1,424 |
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ThreadedComponent import threadedcomponent, threadedadaptivecommscomponent
import heapq
import time
class SchedulingComponentMixin(object):
"""
SchedulingComponent() -> new SchedulingComponent
Base class for a threadedcomponent with an inbuilt scheduler, allowing a
component to block until a scheduled event is ready or a message is received
on an inbox.
"""
Inboxes = {"inbox" : "Standard inbox for receiving data from other components",
"control" : "Standard inbox for receiving control messages from other components",
"event" : "Scheduled events which are ready to be processed"}
def __init__(self, **argd):
super(SchedulingComponentMixin, self).__init__(**argd)
self.eventQueue = []
def scheduleRel(self, message, delay, priority=1):
"""
Schedule an event to wake the component and send a message to the
"event" inbox after a delay.
"""
return self.scheduleAbs(message, time.time() + delay, priority)
def scheduleAbs(self, message, eventTime, priority=1):
"""
Schedule an event to wake the component and send a message to the
"event" inbox after at a specified time.
"""
event = eventTime, priority, message
heapq.heappush(self.eventQueue, event)
return event
def cancelEvent(self, event):
""" Remove a scheduled event from the scheduler """
self.eventQueue.remove(event)
heapq.heapify(self.eventQueue)
def eventReady(self):
""" Returns true if there is an event ready to be processed """
if self.eventQueue:
eventTime = self.eventQueue[0][0]
if time.time() >= eventTime:
return True
return False
def pause(self):
"""
Sleep until there is either an event ready or a message is received on
an inbox
"""
if self.eventReady():
self.signalEvent()
else:
if self.eventQueue:
eventTime = self.eventQueue[0][0]
super(SchedulingComponentMixin, self).pause(eventTime - time.time())
if self.eventReady():
self.signalEvent()
else:
super(SchedulingComponentMixin, self).pause()
def signalEvent(self):
"""
Put the event message of the earliest scheduled event onto the
component's "event" inbox and remove it from the scheduler.
"""
eventTime, priority, message = heapq.heappop(self.eventQueue)
#print "Signalling, late by:", (time.time() - eventTime)
if not self.inqueues["event"].full():
self.inqueues["event"].put(message)
class SchedulingComponent(SchedulingComponentMixin, threadedcomponent):
def __init__(self, **argd):
super(SchedulingComponent, self).__init__(**argd)
class SchedulingAdaptiveCommsComponent(SchedulingComponentMixin,
threadedadaptivecommscomponent):
def __init__(self, **argd):
super(SchedulingAdaptiveCommsComponent, self).__init__(**argd)
| sparkslabs/kamaelia_ | Sketches/JT/Jam/library/trunk/Axon/SchedulingComponent.py | Python | apache-2.0 | 3,988 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| ppwwyyxx/tensorflow | tensorflow/python/training/checkpoint_utils.py | Python | apache-2.0 | 19,434 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a VM with the provided name, metadata, and auth scopes."""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/global/', collection, '/', name])
def ZonalComputeUrl(project, zone, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/zones/', zone, '/', collection, '/', name])
def GenerateConfig(context):
"""Generate configuration."""
base_name = context.properties['instanceName']
items = []
for key, value in context.properties['metadata'].iteritems():
items.append({
'key': key,
'value': value
})
metadata = {'items': items}
# Properties for the container-based instance.
instance = {
'zone': context.properties['zone'],
'machineType': ZonalComputeUrl(
context.env['project'], context.properties['zone'], 'machineTypes',
'f1-micro'),
'metadata': metadata,
'serviceAccounts': [{
'email': 'default',
'scopes': context.properties['scopes']
}],
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskName': base_name + '-disk',
'sourceImage': GlobalComputeUrl(
'debian-cloud', 'images',
''.join(['backports-debian', '-7-wheezy-v20151104']))
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': GlobalComputeUrl(
context.env['project'], 'networks', 'default')
}]
}
# Resources and output to return.
return {
'resources': [{
'name': base_name,
'type': 'compute.v1.instance',
'properties': instance
}]
}
| aljim/deploymentmanager-samples | examples/v2/waiter/instance.py | Python | apache-2.0 | 2,634 |
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from getting_started import main
def test_main(cloud_config, capsys):
main(cloud_config.project)
out, _ = capsys.readouterr()
assert re.search(re.compile(
r'Query Results:.hamlet', re.DOTALL), out)
| clarko1/Cramd | bigquery/api/getting_started_test.py | Python | apache-2.0 | 808 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Example code to perform int8 GEMM"
import logging
import sys
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.cuda.tensor_intrin import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"):
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
| dmlc/tvm | apps/topi_recipe/gemm/gemm_int8.py | Python | apache-2.0 | 5,879 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class SnapshotsClientJSON(rest_client.RestClient):
"""Client class to send CRUD Volume API requests."""
def __init__(self, auth_provider):
super(SnapshotsClientJSON, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
def list_snapshots(self, params=None):
"""List all the snapshot."""
url = 'snapshots'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def list_snapshots_with_detail(self, params=None):
"""List the details of all snapshots."""
url = 'snapshots/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def get_snapshot(self, snapshot_id):
"""Returns the details of a single snapshot."""
url = "snapshots/%s" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshot']
def create_snapshot(self, volume_id, **kwargs):
"""
Creates a new snapshot.
volume_id(Required): id of the volume.
force: Create a snapshot even if the volume attached (Default=False)
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
post_body = {'volume_id': volume_id}
post_body.update(kwargs)
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
"""Updates a snapshot."""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
body = json.loads(body)
return resp, body['snapshot']
# NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
# NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
snapshot_id=snapshot_id)
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
old_value = value = self._get_snapshot_status(snapshot_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if (value == status):
return value
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_snapshot_status(snapshot_id)
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot."""
return self.delete("snapshots/%s" % str(snapshot_id))
def is_resource_deleted(self, id):
try:
self.get_snapshot(id)
except exceptions.NotFound:
return True
return False
def reset_snapshot_status(self, snapshot_id, status):
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
return resp, body
def update_snapshot_status(self, snapshot_id, status, progress):
"""Update the specified snapshot's status."""
post_body = {
'status': status,
'progress': progress
}
post_body = json.dumps({'os-update_snapshot_status': post_body})
url = 'snapshots/%s/action' % str(snapshot_id)
resp, body = self.post(url, post_body)
return resp, body
def create_snapshot_metadata(self, snapshot_id, metadata):
"""Create metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.post(url, put_body)
body = json.loads(body)
return resp, body['metadata']
def get_snapshot_metadata(self, snapshot_id):
"""Get metadata of the snapshot."""
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata(self, snapshot_id, metadata):
"""Update metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.put(url, put_body)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
"""Update metadata item for the snapshot."""
put_body = json.dumps({'meta': meta_item})
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.put(url, put_body)
body = json.loads(body)
return resp, body['meta']
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.delete(url)
return resp, body
def force_delete_snapshot(self, snapshot_id):
"""Force Delete Snapshot."""
post_body = json.dumps({'os-force_delete': {}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
return resp, body
| vedujoshi/os_tempest | tempest/services/volume/json/snapshots_client.py | Python | apache-2.0 | 7,293 |
import os
from segments import Segment, theme
from utils import colors, glyphs
class CurrentDir(Segment):
bg = colors.background(theme.CURRENTDIR_BG)
fg = colors.foreground(theme.CURRENTDIR_FG)
def init(self, cwd):
home = os.path.expanduser('~')
self.text = cwd.replace(home, '~')
class ReadOnly(Segment):
bg = colors.background(theme.READONLY_BG)
fg = colors.foreground(theme.READONLY_FG)
def init(self, cwd):
self.text = ' ' + glyphs.WRITE_ONLY + ' '
if os.access(cwd, os.W_OK):
self.active = False
class Venv(Segment):
bg = colors.background(theme.VENV_BG)
fg = colors.foreground(theme.VENV_FG)
def init(self):
env = os.getenv('VIRTUAL_ENV')
if env is None:
self.active = False
return
env_name = os.path.basename(env)
self.text = glyphs.VIRTUAL_ENV + ' ' + env_name | nimiq/promptastic | segments/filesystem.py | Python | apache-2.0 | 916 |
from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
| qPCR4vir/orange3 | Orange/widgets/io.py | Python | bsd-2-clause | 4,645 |
#! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os
import sys
import re
import subprocess
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
GIT = "git"
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by github's download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
GIT = "git"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.realpath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.realpath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def do_vcs_install(versionfile_source, ipy):
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
| sahat/bokeh | versioneer.py | Python | bsd-3-clause | 25,525 |
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from regressiontests.generic_views.models import Artist, Author, Book, Page
from regressiontests.generic_views.forms import AuthorForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
return {
'params': kwargs,
'key': 'value'
}
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'last': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
| faun/django_test | tests/regressiontests/generic_views/views.py | Python | bsd-3-clause | 3,421 |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import os
import logging
import socket
import select
import errno
import time
from operator import itemgetter
from collections import defaultdict
import threading
import pika.compat
from pika.compat import dictkeys
from pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
if pika.compat.PY2:
_SELECT_ERROR = select.error
else:
# select.error was deprecated and replaced by OSError in python 3.3
_SELECT_ERROR = OSError
def _get_select_errno(error):
if pika.compat.PY2:
assert isinstance(error, select.error), repr(error)
return error.args[0]
else:
assert isinstance(error, OSError), repr(error)
return error.errno
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
stop_ioloop_on_close=True,
custom_ioloop=None):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param on_open_error_callback: Method to call if the connection cant
be opened
:type on_open_error_callback: method
:param method on_close_callback: Method to call on connection close
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
:raises: RuntimeError
"""
ioloop = custom_ioloop or IOLoop()
super(SelectConnection, self).__init__(parameters, on_open_callback,
on_open_error_callback,
on_close_callback, ioloop,
stop_ioloop_on_close)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
error = super(SelectConnection, self)._adapter_connect()
if not error:
self.ioloop.add_handler(self.socket.fileno(), self._handle_events,
self.event_state)
return error
def _adapter_disconnect(self):
"""Disconnect from the RabbitMQ broker"""
if self.socket:
self.ioloop.remove_handler(self.socket.fileno())
super(SelectConnection, self)._adapter_disconnect()
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Passes through all operations to the loaded poller object.
"""
def __init__(self):
self._poller = self._get_poller()
def __getattr__(self, attr):
return getattr(self._poller, attr)
def _get_poller(self):
"""Determine the best poller to use for this enviroment."""
poller = None
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller()
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller()
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller()
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller()
return poller
class SelectPoller(object):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# Drop out of the poll loop every POLL_TIMEOUT secs as a worst case, this
# is only a backstop value. We will run timeouts when they are scheduled.
POLL_TIMEOUT = 5
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def __init__(self):
"""Create an instance of the SelectPoller
"""
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {READ: set(), WRITE: set(), ERROR: set()}
self._stopping = False
self._timeouts = {}
self._next_timeout = None
self._processing_fd_event_map = {}
# Mutex for controlling critical sections where ioloop-interrupt sockets
# are created, used, and destroyed. Needed in case `stop()` is called
# from a thread.
self._mutex = threading.Lock()
# ioloop-interrupt socket pair; initialized in start()
self._r_interrupt = None
self._w_interrupt = None
def get_interrupt_pair(self):
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple UDP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
try:
read_sock, write_sock = socket.socketpair()
except AttributeError:
LOGGER.debug("Using custom socketpair for interrupt")
read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
read_sock.bind(('localhost', 0))
write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
write_sock.connect(read_sock.getsockname())
read_sock.setblocking(0)
write_sock.setblocking(0)
return read_sock, write_sock
def read_interrupt(self, interrupt_sock,
events, write_only): # pylint: disable=W0613
""" Read the interrupt byte(s). We ignore the event mask and write_only
flag as we can ony get here if there's data to be read on our fd.
:param int interrupt_sock: The file descriptor to read from
:param int events: (unused) The events generated for this fd
:param bool write_only: (unused) True if poll was called to trigger a
write
"""
try:
os.read(interrupt_sock, 512)
except OSError as err:
if err.errno != errno.EAGAIN:
raise
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
timeout_at = time.time() + deadline
value = {'deadline': timeout_at, 'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
if not self._next_timeout or timeout_at < self._next_timeout:
self._next_timeout = timeout_at
return timeout_id
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
try:
timeout = self._timeouts.pop(timeout_id)
if timeout['deadline'] == self._next_timeout:
self._next_timeout = None
except KeyError:
pass
def get_next_deadline(self):
"""Get the interval to the next timeout event, or a default interval
"""
if self._next_timeout:
timeout = max((self._next_timeout - time.time(), 0))
elif self._timeouts:
deadlines = [t['deadline'] for t in self._timeouts.values()]
self._next_timeout = min(deadlines)
timeout = max((self._next_timeout - time.time(), 0))
else:
timeout = SelectPoller.POLL_TIMEOUT
timeout = min((timeout, SelectPoller.POLL_TIMEOUT))
return timeout * SelectPoller.POLL_TIMEOUT_MULT
def process_timeouts(self):
"""Process the self._timeouts event stack"""
now = time.time()
to_run = [timer for timer in self._timeouts.values()
if timer['deadline'] <= now]
# Run the timeouts in order of deadlines. Although this shouldn't
# be strictly necessary it preserves old behaviour when timeouts
# were only run periodically.
for t in sorted(to_run, key=itemgetter('deadline')):
t['callback']()
del self._timeouts[hash(frozenset(t.items()))]
self._next_timeout = None
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param method handler: What is called when an event happens
:param int events: The event mask
"""
self._fd_handlers[fileno] = handler
self.update_handler(fileno, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
for ev in (READ, WRITE, ERROR):
if events & ev:
self._fd_events[ev].add(fileno)
else:
self._fd_events[ev].discard(fileno)
def remove_handler(self, fileno):
"""Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
try:
del self._processing_fd_event_map[fileno]
except KeyError:
pass
self.update_handler(fileno, 0)
del self._fd_handlers[fileno]
def start(self):
"""Start the main poller loop. It will loop here until self._stopping"""
LOGGER.debug('Starting IOLoop')
self._stopping = False
with self._mutex:
# Watch out for reentry
if self._r_interrupt is None:
# Create ioloop-interrupt socket pair and register read handler.
# NOTE: we defer their creation because some users (e.g.,
# BlockingConnection adapter) don't use the event loop and these
# sockets would get reported as leaks
self._r_interrupt, self._w_interrupt = self.get_interrupt_pair()
self.add_handler(self._r_interrupt.fileno(),
self.read_interrupt,
READ)
interrupt_sockets_created = True
else:
interrupt_sockets_created = False
try:
# Run event loop
while not self._stopping:
self.poll()
self.process_timeouts()
finally:
# Unregister and close ioloop-interrupt socket pair
if interrupt_sockets_created:
with self._mutex:
self.remove_handler(self._r_interrupt.fileno())
self._r_interrupt.close()
self._r_interrupt = None
self._w_interrupt.close()
self._w_interrupt = None
def stop(self):
"""Request exit from the ioloop."""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
with self._mutex:
if self._w_interrupt is None:
return
try:
# Send byte to interrupt the poll loop, use write() for
# consitency.
os.write(self._w_interrupt.fileno(), b'X')
except OSError as err:
if err.errno != errno.EWOULDBLOCK:
raise
except Exception as err:
# There's nothing sensible to do here, we'll exit the interrupt
# loop after POLL_TIMEOUT secs in worst case anyway.
LOGGER.warning("Failed to send ioloop interrupt: %s", err)
raise
def poll(self, write_only=False):
"""Wait for events on interested filedescriptors.
:param bool write_only: Passed through to the hadnlers to indicate
that they should only process write events.
"""
while True:
try:
read, write, error = select.select(self._fd_events[READ],
self._fd_events[WRITE],
self._fd_events[ERROR],
self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
# Build an event bit mask for each fileno we've recieved an event for
fd_event_map = defaultdict(int)
for fd_set, ev in zip((read, write, error), (READ, WRITE, ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= ev
self._process_fd_events(fd_event_map, write_only)
def _process_fd_events(self, fd_event_map, write_only):
""" Processes the callbacks for each fileno we've recieved events.
Before doing so we re-calculate the event mask based on what is
currently set in case it has been changed under our feet by a
previous callback. We also take a store a refernce to the
fd_event_map in the class so that we can detect removal of an
fileno during processing of another callback and not generate
spurious callbacks on it.
:param dict fd_event_map: Map of fds to events recieved on them.
"""
self._processing_fd_event_map = fd_event_map
for fileno in dictkeys(fd_event_map):
if fileno not in fd_event_map:
# the fileno has been removed from the map under our feet.
continue
events = fd_event_map[fileno]
for ev in [READ, WRITE, ERROR]:
if fileno not in self._fd_events[ev]:
events &= ~ev
if events:
handler = self._fd_handlers[fileno]
handler(fileno, events, write_only=write_only)
class KQueuePoller(SelectPoller):
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._kqueue = select.kqueue()
super(KQueuePoller, self).__init__()
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
kevents = list()
if not events & READ:
if fileno in self._fd_events[READ]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
else:
if fileno not in self._fd_events[READ]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if not events & WRITE:
if fileno in self._fd_events[WRITE]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
else:
if fileno not in self._fd_events[WRITE]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
for event in kevents:
self._kqueue.control([event], 0)
super(KQueuePoller, self).update_handler(fileno, events)
def _map_event(self, kevent):
"""return the event type associated with a kevent object
:param kevent kevent: a kevent object as returned by kqueue.control()
"""
if kevent.filter == select.KQ_FILTER_READ:
return READ
elif kevent.filter == select.KQ_FILTER_WRITE:
return WRITE
elif kevent.flags & select.KQ_EV_ERROR:
return ERROR
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
while True:
try:
kevents = self._kqueue.control(None, 1000,
self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
fd_event_map = defaultdict(int)
for event in kevents:
fileno = event.ident
fd_event_map[fileno] |= self._map_event(event)
self._process_fd_events(fd_event_map, write_only)
class PollPoller(SelectPoller):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1000
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll = self.create_poller()
super(PollPoller, self).__init__()
def create_poller(self):
return select.poll() # pylint: disable=E1101
def add_handler(self, fileno, handler, events):
"""Add a file descriptor to the poll set
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll.register(fileno, events)
super(PollPoller, self).add_handler(fileno, handler, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
super(PollPoller, self).update_handler(fileno, events)
self._poll.modify(fileno, events)
def remove_handler(self, fileno):
"""Remove a fileno to the set
:param int fileno: The file descriptor
"""
super(PollPoller, self).remove_handler(fileno)
self._poll.unregister(fileno)
def poll(self, write_only=False):
"""Poll until the next timeout waiting for an event
:param bool write_only: Only process write events
"""
while True:
try:
events = self._poll.poll(self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
fd_event_map = defaultdict(int)
for fileno, event in events:
fd_event_map[fileno] |= event
self._process_fd_events(fd_event_map, write_only)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1
def create_poller(self):
return select.epoll() # pylint: disable=E1101
| reddec/pika | pika/adapters/select_connection.py | Python | bsd-3-clause | 21,137 |
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import compiler
import inspect
import textwrap
import tokenize
from .compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
if sys.version_info[0] >= 3:
nxt = file.__next__
else:
nxt = file.next
for token in tokenize.generate_tokens(nxt):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| nguy/artview | docs/sphinxext/numpydoc/comment_eater.py | Python | bsd-3-clause | 5,425 |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.users.views',
url(r'^edit/$', 'edit', name='users_edit'),
url(r'^delete/$', 'delete_profile', name='users_delete'),
url(r'^(?P<username>[\w-]+)/$', 'profile', name='users_profile'),
)
| mozilla/popcorn_maker | popcorn_gallery/users/urls.py | Python | bsd-3-clause | 294 |
import copy
from django import forms
from django.db import models
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.db.models.fields.subclassing import Creator
from djangae.forms.fields import ListFormField
from django.utils.text import capfirst
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class IterableField(models.Field):
__metaclass__ = models.SubfieldBase
@property
def _iterable_type(self): raise NotImplementedError()
def db_type(self, connection):
return 'list'
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if value is None:
raise ValueError("You can't query an iterable field with None")
if lookup_type == 'isnull' and value in (True, False):
return value
if lookup_type != 'exact' and lookup_type != 'in':
raise ValueError("You can only query using exact and in lookups on iterable fields")
if isinstance(value, (list, set)):
return [ self.item_field_type.to_python(x) for x in value ]
return self.item_field_type.to_python(value)
def get_prep_value(self, value):
if value is None:
raise ValueError("You can't set a {} to None (did you mean {}?)".format(
self.__class__.__name__, str(self._iterable_type())
))
if isinstance(value, basestring):
# Catch accidentally assigning a string to a ListField
raise ValueError("Tried to assign a string to a {}".format(self.__class__.__name__))
return super(IterableField, self).get_prep_value(value)
def __init__(self, item_field_type, *args, **kwargs):
# This seems bonkers, we shout at people for specifying null=True, but then do it ourselves. But this is because
# *we* abuse None values for our own purposes (to represent an empty iterable) if someone else tries to then
# all hell breaks loose
if kwargs.get("null", False):
raise RuntimeError("IterableFields cannot be set as nullable (as the datastore doesn't differentiate None vs []")
kwargs["null"] = True
default = kwargs.get("default", [])
self._original_item_field_type = copy.deepcopy(item_field_type) # For deconstruction purposes
if default is not None and not callable(default):
kwargs["default"] = lambda: self._iterable_type(default)
if hasattr(item_field_type, 'attname'):
item_field_type = item_field_type.__class__
if callable(item_field_type):
item_field_type = item_field_type()
if isinstance(item_field_type, models.ForeignKey):
raise ImproperlyConfigured("Lists of ForeignKeys aren't supported, use RelatedSetField instead")
self.item_field_type = item_field_type
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field_type, 'attname')
self.item_field_type.set_attributes_from_name('value')
super(IterableField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IterableField, self).deconstruct()
args = (self._original_item_field_type,)
del kwargs["null"]
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
self.item_field_type.model = cls
self.item_field_type.name = name
super(IterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field_type, '__metaclass__', None)
if item_metaclass and issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
def _map(self, function, iterable, *args, **kwargs):
return self._iterable_type(function(element, *args, **kwargs) for element in iterable)
def to_python(self, value):
if value is None:
return self._iterable_type([])
# Because a set cannot be defined in JSON, we must allow a list to be passed as the value
# of a SetField, as otherwise SetField data can't be loaded from fixtures
if not hasattr(value, "__iter__"): # Allows list/set, not string
raise ValueError("Tried to assign a {} to a {}".format(value.__class__.__name__, self.__class__.__name__))
return self._map(self.item_field_type.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(lambda item: self.item_field_type.pre_save(_FakeModel(self.item_field_type, item), add), value)
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
if value is None:
return None
# If the value is an empty iterable, store None
if value == self._iterable_type([]):
return None
return self._map(self.item_field_type.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
return self.item_field_type.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, value_list, model_instance):
""" We want to override the default validate method from django.db.fields.Field, because it
is only designed to deal with a single choice from the user.
"""
if not self.editable:
# Skip validation for non-editable fields
return
# Validate choices
if self.choices:
valid_values = []
for choice in self.choices:
if isinstance(choice[0], (list, tuple)):
# this is an optgroup, so look inside it for the options
for optgroup_choice in choice[0]:
valid_values.append(optgroup_choice[0])
else:
valid_values.append(choice[0])
for value in value_list:
if value not in valid_values:
# TODO: if there is more than 1 invalid value then this should show all of the invalid values
raise ValidationError(self.error_messages['invalid_choice'] % value)
# Validate null-ness
if value_list is None and not self.null:
raise ValidationError(self.error_messages['null'])
if not self.blank and not value_list:
raise ValidationError(self.error_messages['blank'])
# apply the default items validation rules
for value in value_list:
self.item_field_type.clean(value, model_instance)
def formfield(self, **kwargs):
""" If this field has choices, then we can use a multiple choice field.
NB: The choices must be set on *this* field, e.g. this_field = ListField(CharField(), choices=x)
as opposed to: this_field = ListField(CharField(choices=x))
"""
#Largely lifted straight from Field.formfield() in django.models.__init__.py
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default(): #No idea what this does
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
form_field_class = forms.MultipleChoiceField
defaults['choices'] = self.get_choices(include_blank=False) #no empty value on a multi-select
else:
form_field_class = ListFormField
defaults.update(**kwargs)
return form_field_class(**defaults)
class ListField(IterableField):
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r." % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
value = super(ListField, self).pre_save(model_instance, add)
if value and self.ordering:
value.sort(key=self.ordering)
return value
@property
def _iterable_type(self):
return list
def deconstruct(self):
name, path, args, kwargs = super(ListField, self).deconstruct()
kwargs['ordering'] = self.ordering
return name, path, args, kwargs
class SetField(IterableField):
@property
def _iterable_type(self):
return set
def db_type(self, connection):
return 'set'
def get_db_prep_save(self, *args, **kwargs):
ret = super(SetField, self).get_db_prep_save(*args, **kwargs)
if ret:
ret = list(ret)
return ret
def get_db_prep_lookup(self, *args, **kwargs):
ret = super(SetField, self).get_db_prep_lookup(*args, **kwargs)
if ret:
ret = list(ret)
return ret
def value_to_string(self, obj):
"""
Custom method for serialization, as JSON doesn't support
serializing sets.
"""
return str(list(self._get_val_from_obj(obj)))
| nealedj/djangae | djangae/fields/iterable.py | Python | bsd-3-clause | 10,309 |
from __future__ import absolute_import
from datetime import datetime
from django.utils import timezone
from django.core.urlresolvers import reverse
from sentry.models import (
ProcessingIssue, EventError, RawEvent, EventProcessingIssue
)
from sentry.testutils import APITestCase
class ProjectProjectProcessingIssuesTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(team=team, name='foo')
raw_event = RawEvent.objects.create(
project_id=project1.id,
event_id='abc'
)
issue, _ = ProcessingIssue.objects.get_or_create(
project_id=project1.id,
checksum='abc',
type=EventError.NATIVE_MISSING_DSYM
)
EventProcessingIssue.objects.get_or_create(
raw_event=raw_event,
processing_issue=issue,
)
url = reverse('sentry-api-0-project-processing-issues', kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['hasIssues'] is True
assert response.data['hasMoreResolveableIssues'] is False
assert response.data['numIssues'] == 1
assert response.data['issuesProcessing'] == 0
assert response.data['resolveableIssues'] == 0
def test_issues(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(team=team, name='foo')
raw_event = RawEvent.objects.create(
project_id=project1.id,
event_id='abc'
)
issue, _ = ProcessingIssue.objects.get_or_create(
project_id=project1.id,
checksum='abc',
type=EventError.NATIVE_MISSING_DSYM,
datetime=datetime(2013, 8, 13, 3, 8, 25, tzinfo=timezone.utc),
)
issue2, _ = ProcessingIssue.objects.get_or_create(
project_id=project1.id,
checksum='abcd',
type=EventError.NATIVE_MISSING_DSYM,
datetime=datetime(2014, 8, 13, 3, 8, 25, tzinfo=timezone.utc),
)
EventProcessingIssue.objects.get_or_create(
raw_event=raw_event,
processing_issue=issue,
)
url = reverse('sentry-api-0-project-processing-issues', kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
})
response = self.client.get(url + '?detailed=1', format='json')
assert response.status_code == 200, response.content
assert len(response.data['issues']) == 2
assert response.data['numIssues'] == 2
assert response.data['lastSeen'] == issue2.datetime
assert response.data['hasIssues'] is True
assert response.data['hasMoreResolveableIssues'] is False
assert response.data['issuesProcessing'] == 0
assert response.data['resolveableIssues'] == 0
assert response.data['issues'][0]['checksum'] == issue.checksum
assert response.data['issues'][0]['numEvents'] == 1
assert response.data['issues'][0]['type'] == EventError.NATIVE_MISSING_DSYM
assert response.data['issues'][1]['checksum'] == issue2.checksum
def test_resolvable_issues(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(team=team, name='foo')
RawEvent.objects.create(
project_id=project1.id,
event_id='abc'
)
url = reverse('sentry-api-0-project-processing-issues', kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
})
response = self.client.get(url + '?detailed=1', format='json')
assert response.status_code == 200, response.content
assert response.data['numIssues'] == 0
assert response.data['resolveableIssues'] == 1
assert response.data['lastSeen'] is None
assert response.data['hasIssues'] is False
assert response.data['hasMoreResolveableIssues'] is False
assert response.data['numIssues'] == 0
assert response.data['issuesProcessing'] == 0
| BuildingLink/sentry | tests/sentry/api/endpoints/test_project_processingissues.py | Python | bsd-3-clause | 4,405 |
"""Tools for manipulating of large commutative expressions. """
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import iterable, is_sequence, SYMPY_INTS
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic, preorder_traversal
from sympy.core.expr import Expr
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer, Number, I
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple, Dict
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
variations, ordered)
from collections import defaultdict
def _isnumber(i):
return isinstance(i, (SYMPY_INTS, float)) or i.is_Number
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
This is strictly only valid if the exponent from which
the integer is extracted is itself an integer or the
base is positive. These conditions are assumed and not
checked here.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors(object):
"""Efficient representation of ``f_1*f_2*...*f_n``."""
__slots__ = ['factors', 'gens']
def __init__(self, factors=None): # Factors
"""Initialize Factors from dict or expr.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x
>>> from sympy import I
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset([2, x])
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (SYMPY_INTS, float)):
factors = S(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors is None or factors is S.One:
factors = {}
elif factors is S.Zero or factors == 0:
factors = {S.Zero: S.One}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[S.NegativeOne] = S.One
n = -n
if n is not S.One:
if n.is_Float or n.is_Integer or n is S.Infinity:
factors[n] = S.One
elif n.is_Rational:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.p != 1:
factors[Integer(n.p)] = S.One
factors[Integer(n.q)] = S.NegativeOne
else:
raise ValueError('Expected Float|Rational|Integer, not %s' % n)
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: S.One}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
if i:
factors[I] = S.One*i
if nc:
factors[Mul(*nc, evaluate=False)] = S.One
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = []
for k in factors:
if k is I or k in (-1, 1):
handle.append(k)
if handle:
i1 = S.One
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 is not S.One:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a is S.NegativeOne:
factors[a] = S.One
elif a is I:
factors[I] = S.One
elif a.is_Pow:
if S.NegativeOne not in factors:
factors[S.NegativeOne] = S.Zero
factors[S.NegativeOne] += a.exp
elif a == 1:
factors[a] = S.One
elif a == -1:
factors[-a] = S.One
factors[S.NegativeOne] = S.One
else:
raise ValueError('unexpected factor in i1: %s' % a)
self.factors = factors
try:
self.gens = frozenset(factors.keys())
except AttributeError:
raise TypeError('expecting Expr or dictionary')
def __hash__(self): # Factors
keys = tuple(ordered(self.factors.keys()))
values = [self.factors[k] for k in keys]
return hash((keys, values))
def __repr__(self): # Factors
return "Factors({%s})" % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and S.Zero in f
@property
def is_one(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self): # Factors
"""Return the underlying expression.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
b, e = factor.as_base_exp()
if isinstance(exp, int):
e = _keep_coeff(Integer(exp), e)
elif isinstance(exp, Rational):
e = _keep_coeff(exp, e)
else:
e *= exp
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def mul(self, other): # Factors
"""Return Factors of ``self * other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return (Factors(), Factors(S.Zero))
if self.is_zero:
return (Factors(S.Zero), Factors())
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
if r:
self_factors[factor] = r
del other_factors[factor]
else: # should be handled already
del self_factors[factor]
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other): # Factors
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(S(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> from sympy.core.exprtools import factor_terms
>>> n, d = Factors(2**(2*x + 2)).div(S(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return (Factors(S.Zero), Factors())
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
continue
exp = -d
else:
r = quo[factor].extract_additively(exp)
if r is not None:
if r:
quo[factor] = r
else: # should be handled already
del quo[factor]
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
continue
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other): # Factors
"""Return numerator Factor of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other): # Factors
"""Return denominator Factors of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other): # Factors
"""Return self raised to a non-negative integer power.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, SYMPY_INTS) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other): # Factors
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
if factor in other.factors:
exp = min(exp, other.factors[factor])
factors[factor] = exp
return Factors(factors)
def lcm(self, other): # Factors
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other): # Factors
return self.mul(other)
def __divmod__(self, other): # Factors
return self.div(other)
def __div__(self, other): # Factors
return self.quo(other)
__truediv__ = __div__
def __mod__(self, other): # Factors
return self.rem(other)
def __pow__(self, other): # Factors
return self.pow(other)
def __eq__(self, other): # Factors
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
def __ne__(self, other): # Factors
return not self.__eq__(other)
class Term(object):
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ['coeff', 'numer', 'denom']
def __init__(self, term, numer=None, denom=None): # Term
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression(
'commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = defaultdict(int), defaultdict(int)
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] += exp
else:
denom[base] += -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self): # Term
return hash((self.coeff, self.numer, self.denom))
def __repr__(self): # Term
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self): # Term
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other): # Term
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self): # Term
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other): # Term
return self.mul(other.inv())
def pow(self, other): # Term
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other): # Term
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other): # Term
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other): # Term
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __div__(self, other): # Term
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __pow__(self, other): # Term
if isinstance(other, SYMPY_INTS):
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other): # Term
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other): # Term
return not self.__eq__(other)
def _gcd_terms(terms, isprimitive=False, fraction=True):
"""Helper function for :func:`gcd_terms`.
If ``isprimitive`` is True then the call to primitive
for an Add will be skipped. This is useful when the
content has already been extrated.
If ``fraction`` is True then the expression will appear over a common
denominator, the lcm of all term denominators.
"""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
terms = list(map(Term, [t for t in terms if t]))
# there is some simplification that may happen if we leave this
# here rather than duplicate it before the mapping of Term onto
# the terms
if len(terms) == 0:
return S.Zero, S.Zero, S.One
if len(terms) == 1:
cont = terms[0].coeff
numer = terms[0].numer.as_expr()
denom = terms[0].denom.as_expr()
else:
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
if fraction:
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
else:
numers = [t.as_expr() for t in terms]
denom = Term(S(1)).numer
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True, fraction=True):
"""Compute the GCD of ``terms`` and put them together.
``terms`` can be an expression or a non-Basic sequence of expressions
which will be handled as though they are terms from a sum.
If ``isprimitive`` is True the _gcd_terms will not run the primitive
method on the terms.
``clear`` controls the removal of integers from the denominator of an Add
expression. When True (default), all numerical denominator will be cleared;
when False the denominators will be cleared only if all terms had numerical
denominators other than 1.
``fraction``, when True (default), will put the expression over a common
denominator.
Examples
========
>>> from sympy.core import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
>>> gcd_terms(x/2 + 1/x)
(x**2 + 2)/(2*x)
>>> gcd_terms(x/2 + 1/x, fraction=False)
(x + 2/x)/2
>>> gcd_terms(x/2 + 1/x, fraction=False, clear=False)
x/2 + 1/x
>>> gcd_terms(x/2/y + 1/x/y)
(x**2 + 2)/(2*x*y)
>>> gcd_terms(x/2/y + 1/x/y, fraction=False, clear=False)
(x + 2/x)/(2*y)
The ``clear`` flag was ignored in this case because the returned
expression was a rational expression, not a simple sum.
See Also
========
factor_terms, sympy.polys.polytools.terms_gcd
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
isadd = isinstance(terms, Add)
addlike = isadd or not isinstance(terms, Basic) and \
is_sequence(terms, include=set) and \
not isinstance(terms, Dict)
if addlike:
if isadd: # i.e. an Add
terms = list(terms.args)
else:
terms = sympify(terms)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive, fraction)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if not isinstance(terms, Basic):
return terms
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction)
for i in args]), clear=clear)
def handle(a):
# don't treat internal args like terms of an Add
if not isinstance(a, Expr):
if isinstance(a, Basic):
return a.func(*[handle(i) for i in a.args])
return type(a)([handle(i) for i in a])
return gcd_terms(a, isprimitive, clear, fraction)
if isinstance(terms, Dict):
return Dict(*[(k, handle(v)) for k, v in terms.args])
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
If fraction=True (default is False) then a common denominator will be
constructed for the expression.
If sign=True (default) then even if the only factor in common is a -1,
it will be factored out of the expression.
Examples
========
>>> from sympy import factor_terms, Symbol
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When ``clear`` is False, a rational will only be factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
This only applies when there is a single Add that the coefficient
multiplies:
>>> factor_terms(x*y/2 + y, clear=True)
y*(x + 2)/2
>>> factor_terms(x*y/2 + y, clear=False) == _
True
If a -1 is all that can be factored out, to *not* factor it out, the
flag ``sign`` must be False:
>>> factor_terms(-x - y)
-(x + y)
>>> factor_terms(-x - y, sign=False)
-x - y
>>> factor_terms(-2*x - 2*y, sign=False)
-2*(x + y)
See Also
========
gcd_terms, sympy.polys.polytools.terms_gcd
"""
from sympy.simplify.simplify import bottom_up
def do(expr):
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([do(i) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or \
is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([do(i) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical)
if p.is_Add:
list_args = [do(a) for a in Add.make_args(p)]
# get a common negative (if there) which gcd_terms does not remove
if all(a.as_coeff_Mul()[0] < 0 for a in list_args):
cont = -cont
list_args = [-a for a in list_args]
# watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2)
special = {}
for i, a in enumerate(list_args):
b, e = a.as_base_exp()
if e.is_Mul and e != Mul(*e.args):
list_args[i] = Dummy()
special[list_args[i]] = a
# rebuild p not worrying about the order which gcd_terms will fix
p = Add._from_args(list_args)
p = gcd_terms(p,
isprimitive=True,
clear=clear,
fraction=fraction).xreplace(special)
elif p.args:
p = p.func(
*[do(a) for a in p.args])
rv = _keep_coeff(cont, p, clear=clear, sign=sign)
return rv
expr = sympify(expr)
return do(expr)
def _mask_nc(eq, name=None):
"""
Return ``eq`` with non-commutative objects replaced with Dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is noncommutative
and cannot be made commutative. The third value returned is a list
of any non-commutative symbols that appear in the returned equation.
``name``, if given, is the name that will be used with numered Dummy
variables that will replace the non-commutative objects and is mainly
used for doctesting purposes.
Notes
=====
All non-commutative objects other than Symbols are replaced with
a non-commutative Symbol. Identical objects will be identified
by identical symbols.
If there is only 1 non-commutative object in an expression it will
be replaced with a commutative symbol. Otherwise, the non-commutative
entities are retained and the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import symbols, Mul
>>> from sympy.core.exprtools import _mask_nc
>>> from sympy.abc import x, y
>>> A, B, C = symbols('A,B,C', commutative=False)
One nc-symbol:
>>> _mask_nc(A**2 - x**2, 'd')
(_d0**2 - x**2, {_d0: A}, [])
Multiple nc-symbols:
>>> _mask_nc(A**2 - B**2, 'd')
(A**2 - B**2, None, [A, B])
An nc-object with nc-symbols but no others outside of it:
>>> _mask_nc(1 + x*Commutator(A, B), 'd')
(_d0*x + 1, {_d0: Commutator(A, B)}, [])
>>> _mask_nc(NO(Fd(x)*F(y)), 'd')
(_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
Multiple nc-objects:
>>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
>>> _mask_nc(eq, 'd')
(x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
Multiple nc-objects and nc-symbols:
>>> eq = A*Commutator(A, B) + B*Commutator(A, C)
>>> _mask_nc(eq, 'd')
(A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
If there is an object that:
- doesn't contain nc-symbols
- but has arguments which derive from Basic, not Expr
- and doesn't define an _eval_is_commutative routine
then it will give False (or None?) for the is_commutative test. Such
objects are also removed by this routine:
>>> from sympy import Basic
>>> eq = (1 + Mul(Basic(), Basic(), evaluate=False))
>>> eq.is_commutative
False
>>> _mask_nc(eq, 'd')
(_d0**2 + 1, {_d0: Basic()}, [])
"""
name = name or 'mask'
# Make Dummy() append sequential numbers to the name
def numbered_names():
i = 0
while True:
yield name + str(i)
i += 1
names = numbered_names()
def Dummy(*args, **kwargs):
from sympy import Dummy
return Dummy(next(names), *args, **kwargs)
expr = eq
if expr.is_commutative:
return eq, {}, []
# identify nc-objects; symbols and other
rep = []
nc_obj = set()
nc_syms = set()
pot = preorder_traversal(expr, keys=default_sort_key)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pot.skip()
elif not a.is_commutative:
if a.is_Symbol:
nc_syms.add(a)
elif not (a.is_Add or a.is_Mul or a.is_Pow):
if all(s.is_commutative for s in a.free_symbols):
rep.append((a, Dummy()))
else:
nc_obj.add(a)
pot.skip()
# If there is only one nc symbol or object, it can be factored regularly
# but polys is going to complain, so replace it with a Dummy.
if len(nc_obj) == 1 and not nc_syms:
rep.append((nc_obj.pop(), Dummy()))
elif len(nc_syms) == 1 and not nc_obj:
rep.append((nc_syms.pop(), Dummy()))
# Any remaining nc-objects will be replaced with an nc-Dummy and
# identified as an nc-Symbol to watch out for
nc_obj = sorted(nc_obj, key=default_sort_key)
for n in nc_obj:
nc = Dummy(commutative=False)
rep.append((n, nc))
nc_syms.add(nc)
expr = expr.subs(rep)
nc_syms = list(nc_syms)
nc_syms.sort(key=default_sort_key)
return expr, dict([(v, k) for k, v in rep]) or None, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
**examples**
>>> from sympy.core.exprtools import factor_nc
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from sympy.simplify.simplify import powsimp
from sympy.polys import gcd, factor
def _pemexpand(expr):
"Expand with the minimal set of hints necessary to check the result."
return expr.expand(deep=True, mul=True, power_exp=True,
power_base=False, basic=False, multinomial=True, log=False)
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
if g is not S.One:
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
for i, (cc, _) in enumerate(args):
cc[0] = cc[0]/c
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = powsimp(factor(new_mid))
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
if e.is_Integer:
ncfac.extend([b]*e)
else:
ncfac.append(f)
pre_mid = g*Mul(*cfac)*l
target = _pemexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _pemexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
| kmacinnis/sympy | sympy/core/exprtools.py | Python | bsd-3-clause | 41,487 |
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` but allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self):
"""Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None:
cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname)
obj = self._choose_filter()
self.filter = obj
######################################################################
# Non-public interface.
######################################################################
def _choose_filter(self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new)
| dmsurti/mayavi | mayavi/filters/user_defined.py | Python | bsd-3-clause | 3,082 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = stats.fprob(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the Anova F-value for the provided sample
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared statistic for each class/feature combination.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain booleans or frequencies (e.g., term counts in document
classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is the target false
discovery rate.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
alpha = self.alpha
sv = np.sort(self.pvalues_)
selected = sv[sv < alpha * np.arange(len(self.pvalues_))]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| loli/semisupervisedforests | sklearn/feature_selection/univariate_selection.py | Python | bsd-3-clause | 18,609 |
# coding=utf-8
from __future__ import absolute_import
from .base import *
# ######### IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
},
} | manazag/hopper.pw | hopperpw/hopperpw/settings/test.py | Python | bsd-3-clause | 229 |
VERSION = (1, 0, 0,)
__version__ = '.'.join(map(str, VERSION))
default_app_config = 'admin_sso.apps.AdminSSOConfig'
# Do not use Django settings at module level as recommended
try:
from django.utils.functional import LazyObject
except ImportError:
pass
else:
class LazySettings(LazyObject):
def _setup(self):
from admin_sso import default_settings
self._wrapped = Settings(default_settings)
class Settings(object):
def __init__(self, settings_module):
for setting in dir(settings_module):
if setting == setting.upper():
setattr(self, setting, getattr(settings_module, setting))
settings = LazySettings()
| frog32/django-admin-sso | admin_sso/__init__.py | Python | bsd-3-clause | 716 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import versioneer
__author__ = 'Chia-Jung, Yang'
__email__ = '[email protected]'
__version__ = versioneer.get_version()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| jeroyang/newsletter | newsletter/__init__.py | Python | bsd-3-clause | 394 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
import numpy as np
import os
# Try importing imageio
imageio = None
try:
import imageio
except ImportError:
pass
def volread(filename):
""" volread(filename)
Read volume from a file. If filename is 'stent', read a dedicated
test dataset. For reading any other kind of volume, the imageio
package is required.
"""
if filename == 'stent':
# Get full filename
path = vv.misc.getResourceDir()
filename2 = os.path.join(path, 'stent_vol.ssdf')
if os.path.isfile(filename2):
filename = filename2
else:
raise IOError("File '%s' does not exist." % filename)
# Load
s = vv.ssdf.load(filename)
return s.vol.astype('int16') * s.colorscale
elif imageio is not None:
return imageio.volread(filename)
else:
raise RuntimeError("visvis.volread needs the imageio package to read arbitrary files.")
if __name__ == '__main__':
vol = vv.volread('stent')
t = vv.volshow(vol)
t.renderStyle = 'mip' # maximum intensity projection (is the default)
| chrisidefix/visvis | functions/volread.py | Python | bsd-3-clause | 1,322 |
""" test positional based indexing with iloc """
from datetime import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
DataFrame,
Index,
Interval,
NaT,
Series,
array,
concat,
date_range,
isna,
)
import pandas._testing as tm
from pandas.api.types import is_scalar
from pandas.core.indexing import IndexingError
from pandas.tests.indexing.common import Base
# We pass through the error message from numpy
_slice_iloc_msg = re.escape(
"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean arrays are valid indices"
)
class TestiLoc(Base):
@pytest.mark.parametrize("key", [2, -1, [0, 1, 2]])
def test_iloc_getitem_int_and_list_int(self, key):
self.check_result(
"iloc",
key,
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
class TestiLocBaseIndependent:
"""Tests Independent Of Base Class"""
@pytest.mark.parametrize(
"key",
[
slice(None),
slice(3),
range(3),
[0, 1, 2],
Index(range(3)),
np.asarray([0, 1, 2]),
],
)
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manager):
frame = DataFrame({0: range(3)}, dtype=object)
cat = Categorical(["alpha", "beta", "gamma"])
if not using_array_manager:
assert frame._mgr.blocks[0]._can_hold_element(cat)
df = frame.copy()
orig_vals = df.values
indexer(df)[key, 0] = cat
overwrite = isinstance(key, slice) and key == slice(None)
if overwrite or using_array_manager:
# TODO(ArrayManager) we always overwrite because ArrayManager takes
# the "split" path, which still overwrites
# TODO: GH#39986 this probably shouldn't behave differently
expected = DataFrame({0: cat})
assert not np.shares_memory(df.values, orig_vals)
else:
expected = DataFrame({0: cat}).astype(object)
if not using_array_manager:
assert np.shares_memory(df[0].values, orig_vals)
tm.assert_frame_equal(df, expected)
# check we dont have a view on cat (may be undesired GH#39986)
df.iloc[0, 0] = "gamma"
if overwrite:
assert cat[0] != "gamma"
else:
assert cat[0] != "gamma"
# TODO with mixed dataframe ("split" path), we always overwrite the column
frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)})
df = frame.copy()
orig_vals = df.values
indexer(df)[key, 0] = cat
expected = DataFrame({0: cat, 1: range(3)})
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) does not yet update parent
@td.skip_array_manager_not_yet_implemented
@pytest.mark.parametrize("box", [array, Series])
def test_iloc_setitem_ea_inplace(self, frame_or_series, box, using_array_manager):
# GH#38952 Case with not setting a full column
# IntegerArray without NAs
arr = array([1, 2, 3, 4])
obj = frame_or_series(arr.to_numpy("i8"))
if frame_or_series is Series or not using_array_manager:
values = obj.values
else:
values = obj[0].values
if frame_or_series is Series:
obj.iloc[:2] = box(arr[2:])
else:
obj.iloc[:2, 0] = box(arr[2:])
expected = frame_or_series(np.array([3, 4, 3, 4], dtype="i8"))
tm.assert_equal(obj, expected)
# Check that we are actually in-place
if frame_or_series is Series:
assert obj.values is values
else:
if using_array_manager:
assert obj[0].values is values
else:
assert obj.values.base is values.base and values.base is not None
def test_is_scalar_access(self):
# GH#32085 index with duplicates doesn't matter for _is_scalar_access
index = Index([1, 2, 1])
ser = Series(range(3), index=index)
assert ser.iloc._is_scalar_access((1,))
df = ser.to_frame()
assert df.iloc._is_scalar_access((1, 0))
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list("ABCDE"))
# lists of positions should raise IndexError!
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, 30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, -30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[100]]
s = df["A"]
with pytest.raises(IndexError, match=msg):
s.iloc[[100]]
with pytest.raises(IndexError, match=msg):
s.iloc[[-100]]
# still raise on a single indexer
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[30]
with pytest.raises(IndexError, match=msg):
df.iloc[-30]
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with pytest.raises(IndexError, match=msg):
s.iloc[30]
with pytest.raises(IndexError, match=msg):
s.iloc[-30]
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list("AB"))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[[4, 5, 6]]
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[:, 4]
@pytest.mark.parametrize("index,columns", [(np.arange(20), list("ABCDE"))])
@pytest.mark.parametrize(
"index_vals,column_vals",
[
([slice(None), ["A", "D"]]),
(["1", "2"], slice(None)),
([datetime(2019, 1, 1)], slice(None)),
],
)
def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals):
# GH 25753
df = DataFrame(
np.random.randn(len(index), len(columns)), index=index, columns=columns
)
msg = ".iloc requires numeric indexers, got"
with pytest.raises(IndexError, match=msg):
df.iloc[index_vals, column_vals]
@pytest.mark.parametrize("dims", [1, 2])
def test_iloc_getitem_invalid_scalar(self, dims):
# GH 21982
if dims == 1:
s = Series(np.arange(10))
else:
s = DataFrame(np.arange(100).reshape(10, 10))
with pytest.raises(TypeError, match="Cannot index by location index"):
s.iloc["a"]
def test_iloc_array_not_mutating_negative_indices(self):
# GH 21867
array_with_neg_numbers = np.array([1, 2, -1])
array_copy = array_with_neg_numbers.copy()
df = DataFrame(
{"A": [100, 101, 102], "B": [103, 104, 105], "C": [106, 107, 108]},
index=[1, 2, 3],
)
df.iloc[array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
df.iloc[:, array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({"A": [2, 3, 5], "B": [7, 11, 13]})
s = df["A"]
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
assert result == expected
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = Series(["a"], index=["A"])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# GH 6766
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
assert isna(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=["A", "B", "A", "B"], name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
df = DataFrame(
[
{"A": 1, "B": 2, "C": 3},
{"A": 100, "B": 200, "C": 300},
{"A": 1000, "B": 2000, "C": 3000},
]
)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}])
tm.assert_frame_equal(df.iloc[[0]], expected)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
tm.assert_frame_equal(df.iloc[[0, 1]], expected)
expected = DataFrame([{"B": 2, "C": 3}, {"B": 2000, "C": 3000}], index=[0, 2])
result = df.iloc[[0, 2], [1, 2]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_bool(self):
df = DataFrame(
[
{"A": 1, "B": 2, "C": 3},
{"A": 100, "B": 200, "C": 300},
{"A": 1000, "B": 2000, "C": 3000},
]
)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
result = df.iloc[[True, True, False]]
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[{"A": 1, "B": 2, "C": 3}, {"A": 1000, "B": 2000, "C": 3000}], index=[0, 2]
)
result = df.iloc[lambda x: x.index % 2 == 0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_iloc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
msg = f"Boolean index has wrong length: {len(index)} instead of {len(s)}"
with pytest.raises(IndexError, match=msg):
s.iloc[index]
def test_iloc_getitem_slice(self):
df = DataFrame(
[
{"A": 1, "B": 2, "C": 3},
{"A": 100, "B": 200, "C": 300},
{"A": 1000, "B": 2000, "C": 3000},
]
)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
result = df.iloc[:2]
tm.assert_frame_equal(result, expected)
expected = DataFrame([{"A": 100, "B": 200}], index=[1])
result = df.iloc[1:2, 0:2]
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[{"A": 1, "C": 3}, {"A": 100, "C": 300}, {"A": 1000, "C": 3000}]
)
result = df.iloc[:, lambda df: [0, 2]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df2 = DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
)
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_iloc_setitem_list(self):
# setitem with an iloc list
df = DataFrame(
np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]
)
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_pandas_object(self):
# GH 17193
s_orig = Series([0, 1, 2, 3])
expected = Series([0, -1, -2, 3])
s = s_orig.copy()
s.iloc[Series([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.iloc[Index([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ["B", "B"]
del df["A"]
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True)
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_frame_duplicate_columns_multiple_blocks(
self, using_array_manager
):
# Same as the "assign back to self" check in test_iloc_setitem_dups
# but on a DataFrame with multiple blocks
df = DataFrame([[0, 1], [2, 3]], columns=["B", "B"])
df.iloc[:, 0] = df.iloc[:, 0].astype("f8")
if not using_array_manager:
assert len(df._mgr.blocks) == 2
expected = df.copy()
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# TODO: GH#27620 this test used to compare iloc against ix; check if this
# is redundant with another test comparing iloc against loc
def test_iloc_getitem_frame(self):
df = DataFrame(
np.random.randn(10, 4), index=range(0, 20, 2), columns=range(0, 8, 2)
)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
exp = df.loc[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
expected = df.loc[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
expected = df.loc[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
expected = df.loc[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indices
result = df.iloc[[-1, 1, 3], [-1, 1]]
expected = df.loc[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indices
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
expected = df.loc[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=range(1, 5), dtype=object)
result = df.iloc[s.index]
expected = df.loc[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(
np.random.randn(10, 4), index=list("abcdefghij"), columns=list("ABCD")
)
result = df.iloc[1, 1]
exp = df.loc["b", "B"]
assert result == exp
result = df.iloc[:, 2:3]
expected = df.loc[:, ["C"]]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc["j", "D"]
assert result == exp
# out-of-bounds exception
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(IndexError, match=msg):
df.iloc[10, 5]
# trying to use a label
msg = (
r"Location based indexing can only have \[integer, integer "
r"slice \(START point is INCLUDED, END point is EXCLUDED\), "
r"listlike of integers, boolean array\] types"
)
with pytest.raises(ValueError, match=msg):
df.iloc["j", "D"]
def test_iloc_getitem_doc_issue(self, using_array_manager):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range("20130101", periods=6)
columns = list("ABCD")
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list("aaaa")
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=list("aa"))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
if not using_array_manager:
df._mgr.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5], columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_iloc_setitem_series(self):
df = DataFrame(
np.random.randn(10, 4), index=list("abcdefghij"), columns=list("ABCD")
)
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=range(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
assert result == 1
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(
{"A": np.arange(5, dtype="int64"), "B": np.arange(5, 10, dtype="int64")}
)
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame({"A": [0, 1, 10, 12, 4], "B": [5, 6, 11, 13, 9]})
tm.assert_frame_equal(df, expected)
df = DataFrame(
{"A": ["a", "b", "c", "d", "e"], "B": np.arange(5, 10, dtype="int64")}
)
df.iloc[2:4] = [["x", 11], ["y", 13]]
expected = DataFrame({"A": ["a", "b", "x", "y", "e"], "B": [5, 6, 11, 13, 9]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [[0], slice(None, 1, None), np.array([0])])
@pytest.mark.parametrize("value", [["Z"], np.array(["Z"])])
def test_iloc_setitem_with_scalar_index(self, indexer, value):
# GH #19474
# assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated
# elementwisely, not using "setter('A', ['Z'])".
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
df.iloc[0, indexer] = value
result = df.iloc[0, 0]
assert is_scalar(result) and result == "Z"
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"])
mask = df.a % 2 == 0
msg = "iLocation based boolean indexing cannot use an indexable as a mask"
with pytest.raises(ValueError, match=msg):
df.iloc[mask]
mask.index = range(len(mask))
msg = "iLocation based boolean indexing on an integer type is not available"
with pytest.raises(NotImplementedError, match=msg):
df.iloc[mask]
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = [bin(num) for num in nums]
df = DataFrame({"locs": locs, "nums": nums}, reps)
expected = {
(None, ""): "0b1100",
(None, ".loc"): "0b1100",
(None, ".iloc"): "0b1100",
("index", ""): "0b11",
("index", ".loc"): "0b11",
("index", ".iloc"): (
"iLocation based boolean indexing cannot use an indexable as a mask"
),
("locs", ""): "Unalignable boolean Series provided as indexer "
"(index of the boolean Series and of the indexed "
"object do not match).",
("locs", ".loc"): "Unalignable boolean Series provided as indexer "
"(index of the boolean Series and of the "
"indexed object do not match).",
("locs", ".iloc"): (
"iLocation based boolean indexing on an "
"integer type is not available"
),
}
# UserWarnings from reindex of a boolean mask
with catch_warnings(record=True):
simplefilter("ignore", UserWarning)
for idx in [None, "index", "locs"]:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ["", ".loc", ".iloc"]:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
answer = str(bin(accessor[mask]["nums"].sum()))
except (ValueError, IndexingError, NotImplementedError) as e:
answer = str(e)
key = (
idx,
method,
)
r = expected.get(key)
if r != answer:
raise AssertionError(
f"[{key}] does not match [{answer}], received [{r}]"
)
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({"A": [0.1] * 3000, "B": [1] * 3000})
idx = np.arange(30) * 99
expected = df.iloc[idx]
df3 = concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({"A": [0.1] * 1000, "B": [1] * 1000})
df2 = concat([df2, 2 * df2, 3 * df2])
with pytest.raises(KeyError, match="not in index"):
df2.loc[idx]
def test_iloc_empty_list_indexer_is_ok(self):
df = tm.makeCustomDataframe(5, 2)
# vertical empty
tm.assert_frame_equal(
df.iloc[:, []],
df.iloc[:, :0],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.iloc[[], :],
df.iloc[:0, :],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
def test_identity_slice_returns_new_object(self, using_array_manager):
# GH13873
original_df = DataFrame({"a": [1, 2, 3]})
sliced_df = original_df.iloc[:]
assert sliced_df is not original_df
# should be a shallow copy
original_df["a"] = [4, 4, 4]
if using_array_manager:
# TODO(ArrayManager) verify it is expected that the original didn't change
# setitem is replacing full column, so doesn't update "viewing" dataframe
assert not (sliced_df["a"] == 4).all()
else:
assert (sliced_df["a"] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
assert sliced_series is not original_series
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
def test_indexing_zerodim_np_array(self):
# GH24919
df = DataFrame([[1, 2], [3, 4]])
result = df.iloc[np.array(0)]
s = Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
# GH24919
s = Series([1, 2])
result = s.iloc[np.array(0)]
assert result == 1
@pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/33457")
def test_iloc_setitem_categorical_updates_inplace(self):
# Mixed dtype ensures we go through take_split_path in setitem_with_indexer
cat = Categorical(["A", "B", "C"])
df = DataFrame({1: cat, 2: [1, 2, 3]})
# This should modify our original values in-place
df.iloc[:, 0] = cat[::-1]
expected = Categorical(["C", "B", "A"])
tm.assert_categorical_equal(cat, expected)
def test_iloc_with_boolean_operation(self):
# GH 20627
result = DataFrame([[0, 1], [2, 3], [4, 5], [6, np.nan]])
result.iloc[result.index <= 2] *= 2
expected = DataFrame([[0, 2], [4, 6], [8, 10], [6, np.nan]])
tm.assert_frame_equal(result, expected)
result.iloc[result.index > 2] *= 2
expected = DataFrame([[0, 2], [4, 6], [8, 10], [12, np.nan]])
tm.assert_frame_equal(result, expected)
result.iloc[[True, True, False, False]] *= 2
expected = DataFrame([[0, 4], [8, 12], [8, 10], [12, np.nan]])
tm.assert_frame_equal(result, expected)
result.iloc[[False, False, True, True]] /= 2
expected = DataFrame([[0.0, 4.0], [8.0, 12.0], [4.0, 5.0], [6.0, np.nan]])
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_singlerow_slice_categoricaldtype_gives_series(self):
# GH#29521
df = DataFrame({"x": Categorical("a b c d e".split())})
result = df.iloc[0]
raw_cat = Categorical(["a"], categories=["a", "b", "c", "d", "e"])
expected = Series(raw_cat, index=["x"], name=0, dtype="category")
tm.assert_series_equal(result, expected)
def test_iloc_getitem_categorical_values(self):
# GH#14580
# test iloc() on Series with Categorical data
ser = Series([1, 2, 3]).astype("category")
# get slice
result = ser.iloc[0:2]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get list of indexes
result = ser.iloc[[0, 1]]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get boolean array
result = ser.iloc[[True, False, False]]
expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [None, NaT, np.nan])
def test_iloc_setitem_td64_values_cast_na(self, value):
# GH#18586
series = Series([0, 1, 2], dtype="timedelta64[ns]")
series.iloc[0] = value
expected = Series([NaT, 1, 2], dtype="timedelta64[ns]")
tm.assert_series_equal(series, expected)
def test_iloc_setitem_empty_frame_raises_with_3d_ndarray(self):
idx = Index([])
obj = DataFrame(np.random.randn(len(idx), len(idx)), index=idx, columns=idx)
nd3 = np.random.randint(5, size=(2, 2, 2))
msg = f"Cannot set values with ndim > {obj.ndim}"
with pytest.raises(ValueError, match=msg):
obj.iloc[nd3] = 0
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_iloc_getitem_read_only_values(self, indexer):
# GH#10043 this is fundamentally a test for iloc, but test loc while
# we're here
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
tm.assert_frame_equal(indexer(rw_df)[[1, 2, 3]], indexer(ro_df)[[1, 2, 3]])
tm.assert_frame_equal(indexer(rw_df)[[1]], indexer(ro_df)[[1]])
tm.assert_series_equal(indexer(rw_df)[1], indexer(ro_df)[1])
tm.assert_frame_equal(indexer(rw_df)[1:3], indexer(ro_df)[1:3])
def test_iloc_getitem_readonly_key(self):
# GH#17192 iloc with read-only array raising TypeError
df = DataFrame({"data": np.ones(100, dtype="float64")})
indices = np.array([1, 3, 6])
indices.flags.writeable = False
result = df.iloc[indices]
expected = df.loc[[1, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df["data"].iloc[indices]
expected = df["data"].loc[[1, 3, 6]]
tm.assert_series_equal(result, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_iloc_assign_series_to_df_cell(self):
# GH 37593
df = DataFrame(columns=["a"], index=[0])
df.iloc[0, 0] = Series([1, 2, 3])
expected = DataFrame({"a": [Series([1, 2, 3])]}, columns=["a"], index=[0])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("klass", [list, np.array])
def test_iloc_setitem_bool_indexer(self, klass):
# GH#36741
df = DataFrame({"flag": ["x", "y", "z"], "value": [1, 3, 4]})
indexer = klass([True, False, False])
df.iloc[indexer, 1] = df.iloc[indexer, 1] * 2
expected = DataFrame({"flag": ["x", "y", "z"], "value": [2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [[1], slice(1, 2)])
def test_iloc_setitem_pure_position_based(self, indexer):
# GH#22046
df1 = DataFrame({"a2": [11, 12, 13], "b2": [14, 15, 16]})
df2 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
df2.iloc[:, indexer] = df1.iloc[:, [0]]
expected = DataFrame({"a": [1, 2, 3], "b": [11, 12, 13], "c": [7, 8, 9]})
tm.assert_frame_equal(df2, expected)
def test_iloc_setitem_dictionary_value(self):
# GH#37728
df = DataFrame({"x": [1, 2], "y": [2, 2]})
rhs = {"x": 9, "y": 99}
df.iloc[1] = rhs
expected = DataFrame({"x": [1, 9], "y": [2, 99]})
tm.assert_frame_equal(df, expected)
# GH#38335 same thing, mixed dtypes
df = DataFrame({"x": [1, 2], "y": [2.0, 2.0]})
df.iloc[1] = rhs
expected = DataFrame({"x": [1, 9], "y": [2.0, 99.0]})
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_float_duplicates(self):
df = DataFrame(
np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc")
)
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
df = DataFrame(
np.random.randn(4, 3), index=[1, 0.2, 0.2, 1], columns=list("abc")
)
expect = df.iloc[1:-1]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[1:-1, 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
def test_iloc_setitem_custom_object(self):
# iloc with an object
class TO:
def __init__(self, value):
self.value = value
def __str__(self) -> str:
return f"[{self.value}]"
__repr__ = __str__
def __eq__(self, other) -> bool:
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = TO(1)
df.iloc[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = TO(1)
df.iloc[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_getitem_with_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list("ABC"), index=list("aab"))
result = df.iloc[0]
assert isinstance(result, Series)
tm.assert_almost_equal(result.values, df.values[0])
result = df.T.iloc[:, 0]
assert isinstance(result, Series)
tm.assert_almost_equal(result.values, df.values[0])
def test_iloc_getitem_with_duplicates2(self):
# GH#2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
tm.assert_frame_equal(result, expected)
def test_iloc_interval(self):
# GH#17130
df = DataFrame({Interval(1, 2): [1, 2]})
result = df.iloc[0]
expected = Series({Interval(1, 2): 1}, name=0)
tm.assert_series_equal(result, expected)
result = df.iloc[:, 0]
expected = Series([1, 2], name=Interval(1, 2))
tm.assert_series_equal(result, expected)
result = df.copy()
result.iloc[:, 0] += 1
expected = DataFrame({Interval(1, 2): [2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("indexing_func", [list, np.array])
@pytest.mark.parametrize("rhs_func", [list, np.array])
def test_loc_setitem_boolean_list(self, rhs_func, indexing_func):
# GH#20438 testing specifically list key, not arraylike
ser = Series([0, 1, 2])
ser.iloc[indexing_func([True, False, True])] = rhs_func([5, 10])
expected = Series([5, 1, 10])
tm.assert_series_equal(ser, expected)
df = DataFrame({"a": [0, 1, 2]})
df.iloc[indexing_func([True, False, True])] = rhs_func([[5], [10]])
expected = DataFrame({"a": [5, 1, 10]})
tm.assert_frame_equal(df, expected)
class TestILocErrors:
# NB: this test should work for _any_ Series we can pass as
# series_with_simple_index
def test_iloc_float_raises(self, series_with_simple_index, frame_or_series):
# GH#4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is specifically testing for the error
# message
obj = series_with_simple_index
if frame_or_series is DataFrame:
obj = obj.to_frame()
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
obj.iloc[3.0]
with pytest.raises(IndexError, match=_slice_iloc_msg):
obj.iloc[3.0] = 0
def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame):
with pytest.raises(IndexingError, match="Too many indexers"):
float_frame.iloc[:, :, :]
with pytest.raises(IndexError, match="too many indices for array"):
# GH#32257 we let numpy do validation, get their exception
float_frame.iloc[:, :, :] = 1
# TODO(ArrayManager) "split" path doesn't properly implement DataFrame indexer
@td.skip_array_manager_not_yet_implemented
def test_iloc_frame_indexer(self):
# GH#39004
df = DataFrame({"a": [1, 2, 3]})
indexer = DataFrame({"a": [True, False, True]})
with tm.assert_produces_warning(FutureWarning):
df.iloc[indexer] = 1
msg = (
"DataFrame indexer is not allowed for .iloc\n"
"Consider using .loc for automatic alignment."
)
with pytest.raises(IndexError, match=msg):
df.iloc[indexer]
class TestILocSetItemDuplicateColumns:
def test_iloc_setitem_scalar_duplicate_columns(self):
# GH#15686, duplicate columns and mixed dtype
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
df.iloc[0, 0] = -1
assert df.iloc[0, 0] == -1
assert df.iloc[0, 2] == 3
assert df.dtypes.iloc[2] == np.int64
def test_iloc_setitem_list_duplicate_columns(self):
# GH#22036 setting with same-sized list
df = DataFrame([[0, "str", "str2"]], columns=["a", "b", "b"])
df.iloc[:, 2] = ["str3"]
expected = DataFrame([[0, "str", "str3"]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_series_duplicate_columns(self):
df = DataFrame(
np.arange(8, dtype=np.int64).reshape(2, 4), columns=["A", "B", "A", "B"]
)
df.iloc[:, 0] = df.iloc[:, 0].astype(np.float64)
assert df.dtypes.iloc[2] == np.int64
@pytest.mark.parametrize(
["dtypes", "init_value", "expected_value"],
[("int64", "0", 0), ("float", "1.2", 1.2)],
)
def test_iloc_setitem_dtypes_duplicate_columns(
self, dtypes, init_value, expected_value
):
# GH#22035
df = DataFrame([[init_value, "str", "str2"]], columns=["a", "b", "b"])
df.iloc[:, 0] = df.iloc[:, 0].astype(dtypes)
expected_df = DataFrame(
[[expected_value, "str", "str2"]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected_df)
class TestILocCallable:
def test_frame_iloc_getitem_callable(self):
# GH#11485
df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
# return location
res = df.iloc[lambda x: [1, 3]]
tm.assert_frame_equal(res, df.iloc[[1, 3]])
res = df.iloc[lambda x: [1, 3], :]
tm.assert_frame_equal(res, df.iloc[[1, 3], :])
res = df.iloc[lambda x: [1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
# mixture
res = df.iloc[[1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[[1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
res = df.iloc[lambda x: [1, 3], 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
def test_frame_iloc_setitem_callable(self):
# GH#11485
df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
# return location
res = df.copy()
res.iloc[lambda x: [1, 3]] = 0
exp = df.copy()
exp.iloc[[1, 3]] = 0
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], :] = -1
exp = df.copy()
exp.iloc[[1, 3], :] = -1
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: 0] = 5
exp = df.copy()
exp.iloc[[1, 3], 0] = 5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: [0]] = 25
exp = df.copy()
exp.iloc[[1, 3], [0]] = 25
tm.assert_frame_equal(res, exp)
# mixture
res = df.copy()
res.iloc[[1, 3], lambda x: 0] = -3
exp = df.copy()
exp.iloc[[1, 3], 0] = -3
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[[1, 3], lambda x: [0]] = -5
exp = df.copy()
exp.iloc[[1, 3], [0]] = -5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], 0] = 10
exp = df.copy()
exp.iloc[[1, 3], 0] = 10
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], [0]] = [-5, -5]
exp = df.copy()
exp.iloc[[1, 3], [0]] = [-5, -5]
tm.assert_frame_equal(res, exp)
class TestILocSeries:
def test_iloc(self):
ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
for i in range(len(ser)):
result = ser.iloc[i]
exp = ser[ser.index[i]]
tm.assert_almost_equal(result, exp)
# pass a slice
result = ser.iloc[slice(1, 3)]
expected = ser.loc[2:4]
tm.assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
assert (ser[1:3] == 0).all()
# list of integers
result = ser.iloc[[0, 2, 3, 4, 5]]
expected = ser.reindex(ser.index[[0, 2, 3, 4, 5]])
tm.assert_series_equal(result, expected)
def test_iloc_getitem_nonunique(self):
ser = Series([0, 1, 2], index=[0, 1, 0])
assert ser.iloc[2] == 2
def test_iloc_setitem_pure_position_based(self):
# GH#22046
ser1 = Series([1, 2, 3])
ser2 = Series([4, 5, 6], index=[1, 0, 2])
ser1.iloc[1:3] = ser2.iloc[1:3]
expected = Series([1, 5, 6])
tm.assert_series_equal(ser1, expected)
def test_iloc_nullable_int64_size_1_nan(self):
# GH 31861
result = DataFrame({"a": ["test"], "b": [np.nan]})
result.loc[:, "b"] = result.loc[:, "b"].astype("Int64")
expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")})
tm.assert_frame_equal(result, expected)
| rs2/pandas | pandas/tests/indexing/test_iloc.py | Python | bsd-3-clause | 46,354 |
"""
Copyright (c) 2012-2016 Ben Croston
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from RPi._GPIO import *
VERSION = '0.6.3'
| anatolieGhebea/contatore | python/RPi.GPIO-0.6.3/RPi/GPIO/__init__.py | Python | mit | 1,112 |
from __future__ import print_function
from .patchpipette import PatchPipette
| pbmanis/acq4 | acq4/devices/PatchPipette/__init__.py | Python | mit | 77 |
from io import BytesIO
import tempfile
import os
import time
import shutil
from contextlib import contextmanager
import six
import sys
from netlib import utils, tcp, http
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = BytesIO(bytes)
return tcp.Reader(fp)
@contextmanager
def tmpdir(*args, **kwargs):
orig_workdir = os.getcwd()
temp_workdir = tempfile.mkdtemp(*args, **kwargs)
os.chdir(temp_workdir)
yield temp_workdir
os.chdir(orig_workdir)
shutil.rmtree(temp_workdir)
def _check_exception(expected, actual, exc_tb):
if isinstance(expected, six.string_types):
if expected.lower() not in str(actual).lower():
six.reraise(AssertionError, AssertionError(
"Expected %s, but caught %s" % (
repr(expected), repr(actual)
)
), exc_tb)
else:
if not isinstance(actual, expected):
six.reraise(AssertionError, AssertionError(
"Expected %s, but caught %s %s" % (
expected.__name__, actual.__class__.__name__, repr(actual)
)
), exc_tb)
def raises(expected_exception, obj=None, *args, **kwargs):
"""
Assert that a callable raises a specified exception.
:exc An exception class or a string. If a class, assert that an
exception of this type is raised. If a string, assert that the string
occurs in the string representation of the exception, based on a
case-insenstivie match.
:obj A callable object.
:args Arguments to be passsed to the callable.
:kwargs Arguments to be passed to the callable.
"""
if obj is None:
return RaisesContext(expected_exception)
else:
try:
ret = obj(*args, **kwargs)
except Exception as actual:
_check_exception(expected_exception, actual, sys.exc_info()[2])
else:
raise AssertionError("No exception raised. Return value: {}".format(ret))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
def __enter__(self):
return
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
raise AssertionError("No exception raised.")
else:
_check_exception(self.expected_exception, exc_val, exc_tb)
return True
test_data = utils.Data(__name__)
# FIXME: Temporary workaround during repo merge.
test_data.dirname = os.path.join(test_data.dirname, "..", "test", "netlib")
def treq(**kwargs):
"""
Returns:
netlib.http.Request
"""
default = dict(
first_line_format="relative",
method=b"GET",
scheme=b"http",
host=b"address",
port=22,
path=b"/path",
http_version=b"HTTP/1.1",
headers=http.Headers(((b"header", b"qvalue"), (b"content-length", b"7"))),
content=b"content"
)
default.update(kwargs)
return http.Request(**default)
def tresp(**kwargs):
"""
Returns:
netlib.http.Response
"""
default = dict(
http_version=b"HTTP/1.1",
status_code=200,
reason=b"OK",
headers=http.Headers(((b"header-response", b"svalue"), (b"content-length", b"7"))),
content=b"message",
timestamp_start=time.time(),
timestamp_end=time.time(),
)
default.update(kwargs)
return http.Response(**default)
| tdickers/mitmproxy | netlib/tutils.py | Python | mit | 3,536 |
# extension imports
from _NetworKit import PageRankNibble, GCE | fmaschler/networkit | networkit/scd.py | Python | mit | 62 |
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directoly where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behaviour
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing resturctured text.
"""
from __future__ import print_function
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap
import traceback
from docutils.parsers.rst import directives
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
# os.path.relpath is new in Python 2.6
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in document.nametypes.iteritems():
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': directives.flag,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect('doctree-read', mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
exec "import numpy as np\nfrom matplotlib import pyplot as plt\n" in ns
else:
exec setup.config.plot_pre_code in ns
if "__main__" in code:
exec "__name__ = '__main__'" in ns
exec code in ns
if function_name is not None:
exec function_name + "()" in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams):
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in outdir.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, (str, unicode)):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams)
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception,err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = options.has_key('nofigs')
options.setdefault('include-source', config.plot_include_source)
context = options.has_key('context')
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with open(source_file_name, 'r') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code, source_file_name, build_dir, output_base,
context, function_name, config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with open(target_name, 'w') as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| Solid-Mechanics/matplotlib-4-abaqus | matplotlib/sphinxext/plot_directive.py | Python | mit | 27,667 |
"""
accounts.test_views
===================
Tests the REST API calls.
Add more specific social registration tests
"""
import responses
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.test.utils import override_settings
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from allauth.account import app_settings
from allauth.socialaccount.models import SocialApp
from allauth.socialaccount.providers.facebook.provider import GRAPH_API_URL
from .serializers import LoginSerializer
class TestAccounts(APITestCase):
""" Tests normal use - non social login. """
def setUp(self):
self.login_url = reverse('accounts:rest_login')
self.logout_url = reverse('accounts:rest_logout')
self.register_url = reverse('accounts:rest_register')
self.password_reset_url = reverse('accounts:rest_password_reset')
self.rest_password_reset_confirm_url = reverse('accounts:rest_password_reset_confirm')
self.password_change_url = reverse('accounts:rest_password_change')
self.verify_url = reverse('accounts:rest_verify_email')
self.user_url = reverse('accounts:rest_user_details')
self.client = APIClient()
self.reusable_user_data = {'username': 'admin', 'email': '[email protected]', 'password': 'password12'}
self.reusable_user_data_change_password = {'username': 'admin', 'email': '[email protected]', 'password': 'password_same'}
self.reusable_register_user_data = {'username': 'admin', 'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data1 = {'username': 'admin1', 'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_username = {'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_email = {'username': 'admin', 'password1': 'password12', 'password2': 'password12'}
self.change_password_data_incorrect = {"new_password1": "password_not_same", "new_password2": "password_same"}
self.change_password_data = {"new_password1": "password_same", "new_password2": "password_same"}
self.change_password_data_old_password_field_enabled = {"old_password": "password12", "new_password1": "password_same", "new_password2": "password_same"}
def create_user_and_login(self):
""" Helper function to create a basic user, login and assign token credentials. """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, "Snap! Basic Login has failed with a helper function 'create_user_and_login'. Something is really wrong here.")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + response.data['key'])
def _generate_uid_and_token(self, user):
result = {}
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import default_token_generator
from django import VERSION
if VERSION[1] == 5:
from django.utils.http import int_to_base36
result['uid'] = int_to_base36(user.pk)
else:
from django.utils.http import urlsafe_base64_encode
result['uid'] = urlsafe_base64_encode(force_bytes(user.pk))
result['token'] = default_token_generator.make_token(user)
return result
def cleanUp(self):
pass
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_basic_username_auth_method(self):
""" Tests basic functionality of login with authentication method of username. """
# Assumes you provide username,password and returns a token
get_user_model().objects.create_user('admin3', '', 'password12')
data = {"username": 'admin3', "email": "", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL,
ACCOUNT_EMAIL_REQUIRED=True)
def test_login_basic_email_auth_method(self):
""" Tests basic functionality of login with authentication method of email. """
# Assumes you provide username,password and returns a token
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
data = {"username": '', "email": "[email protected]", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_basic_username_email_auth_method(self):
""" Tests basic functionality of login with authentication method of username or email. """
# Assumes you provide username,password and returns a token
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
# Check email
data = {"username": '', "email": "[email protected]", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
# Check username
data = {"username": 'admin', "email": '', "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_auth_method_username_fail_no_users_in_db(self):
""" Tests login fails with a 400 when no users in db for login auth method of 'username'. """
serializer = LoginSerializer({'username': 'admin', 'password': 'password12'})
response = self.client.post(self.login_url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_fail_no_users_in_db(self):
""" Tests login fails with a 400 when no users in db for login auth method of 'email'. """
serializer = LoginSerializer({'username': 'admin', 'password': 'password12'})
response = self.client.post(self.login_url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_fail_no_users_in_db(self):
""" Tests login fails with a 400 when no users in db for login auth method of 'username_email'. """
serializer = LoginSerializer({'username': 'admin', 'password': 'password12'})
response = self.client.post(self.login_url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def common_test_login_fail_incorrect_change(self):
# Create user, login and try and change password INCORRECTLY
self.create_user_and_login()
self.client.post(self.password_change_url, data=self.change_password_data_incorrect, format='json')
# Remove credentials
self.client.credentials()
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_username_auth_method_fail_incorrect_password_change(self):
""" Tests login fails with an incorrect/invalid password change (login auth username). """
self.common_test_login_fail_incorrect_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_fail_incorrect_password_change(self):
""" Tests login fails with an incorrect/invalid password change (login auth email). """
self.common_test_login_fail_incorrect_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_fail_incorrect_password_change(self):
""" Tests login fails with an incorrect/invalid password change (login auth username_email). """
self.common_test_login_fail_incorrect_change()
def common_test_login_correct_password_change(self):
# Create user, login and try and change password successfully
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
# Remove credentials
self.client.credentials()
response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_username_auth_method_correct_password_change(self):
""" Tests login is succesful with a correct password change (login auth username). """
self.common_test_login_correct_password_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_correct_password_change(self):
""" Tests login is succesful with a correct password change (login auth email). """
self.common_test_login_correct_password_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_correct_password_change(self):
""" Tests login is succesful with a correct password change (login auth username_email). """
self.common_test_login_correct_password_change()
def test_login_fail_no_input(self):
""" Tests login fails when you provide no username and no email (login auth username_email). """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
data = {"username": '', "email": '', "password": ''}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_username_auth_method_fail_no_input(self):
""" Tests login fails when you provide no username (login auth username). """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
data = {"username": '', "email": "[email protected]", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_fail_no_input(self):
""" Tests login fails when you provide no username (login auth email). """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
data = {"username": "admin", "email": '', "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_fail_no_input(self):
""" Tests login fails when you provide no username and no email (login auth username_email). """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
data = {"username": '', "email": '', "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
# need to check for token
# test login with password change
# test login with wrong password chaneg if fails
def test_logout(self):
""" Tests basic logout functionality. """
self.create_user_and_login()
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Successfully logged out."}')
def test_logout_but_already_logged_out(self):
""" Tests logout when already logged out. """
self.create_user_and_login()
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Successfully logged out."}')
self.client.credentials() # remember to remove manual token credential
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response.content)
self.assertEquals(response.content, '{"success":"Successfully logged out."}')
def test_change_password_basic(self):
""" Tests basic functionality of 'change of password'. """
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"New password has been saved."}')
def test_change_password_basic_fails_not_authorised(self):
""" Tests basic functionality of 'change of password' fails if not authorised. """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}')
def common_change_password_login_fail_with_old_password(self, password_change_data):
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=password_change_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.client.credentials() # Remove credentials
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def common_change_password_login_pass_with_new_password(self, password_change_data):
self.create_user_and_login()
response = self.client.post(self.password_change_url, password_change_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.client.credentials() # Remove credentials
response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def common_change_password_login_fail_with_old_password_pass_with_new_password(self, password_change_data):
""" Tests change of password with old password fails but new password successes. """
self.create_user_and_login()
response = self.client.post(self.password_change_url, password_change_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response.content)
self.client.credentials() # Remove credentials
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response.content)
def test_change_password_login_fail_with_old_password(self):
""" Tests change of password with old password. """
self.common_change_password_login_fail_with_old_password(self.change_password_data)
def test_change_password_login_pass_with_new_password(self):
""" Tests change of password with new password. """
self.common_change_password_login_pass_with_new_password(self.change_password_data)
def test_change_password_login_fail_with_old_password_pass_with_new_password(self):
""" Tests change of password with old password fails but new password successes. """
self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_old_password_field_required_old_password_field_enabled(self):
""" Tests basic functionality of 'change of password' fails if old password not given as part of input (old password field enabled). """
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"old_password":["This field is required."]}')
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_basic_old_password_field_enabled(self):
""" Tests basic functionality of 'change of password' (old password enabled). """
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"New password has been saved."}')
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_basic_fails_not_authorised_old_password_field_enabled(self):
""" Tests basic functionality of 'change of password' fails if not authorised (old password field enabled). """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}')
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_login_fail_with_old_password_old_password_field_enabled(self):
""" Tests change of password with old password (old password field enabled). """
self.common_change_password_login_fail_with_old_password(self.change_password_data_old_password_field_enabled)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_login_pass_with_new_password_old_password_field_enabled(self):
""" Tests change of password with new password (old password field enabled). """
self.common_change_password_login_pass_with_new_password(self.change_password_data_old_password_field_enabled)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_login_fail_with_old_password_pass_with_new_password_old_password_field_enabled(self):
""" Tests change of password with old password fails but new password successes (old password field enabled). """
self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data_old_password_field_enabled)
"""
Registrations Tests
===================
"""
def common_test_registration_basic(self, data):
response = self.client.post(self.register_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.content)
return response
@override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=True)
def test_registration_basic(self):
""" Tests basic functionality of registration. """
self.common_test_registration_basic(self.reusable_register_user_data)
@override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=False)
def test_registration_basic_no_username(self):
""" Tests basic functionality of registration (no username required). """
self.common_test_registration_basic(self.reusable_register_user_data_no_username)
@override_settings(ACCOUNT_EMAIL_REQUIRED=False, ACCOUNT_USERNAME_REQUIRED=True)
def test_registration_basic_no_email(self):
""" Tests basic functionality of registration (no username required). """
self.common_test_registration_basic(self.reusable_register_user_data_no_email)
@override_settings(ACCOUNTS_REGISTRATION_OPEN=False)
def test_registration_basic_registration_not_open(self):
""" Tests basic registration fails if registration is closed. """
response = self.client.post(self.register_url, self.reusable_register_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="none")
def test_registration_email_verification_not_necessary(self):
""" Tests you can log in without email verification """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="optional")
def test_registration_email_verification_neccessary(self):
""" Tests you can log in without email verification """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def common_test_registration(self):
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'email': '[email protected]', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def common_test_registration_email_verification_not_necessary_email(self):
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'email': '[email protected]', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def common_test_registration_email_verification_not_necessary_username(self):
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_registration_email_verification_neccessary_email(self):
""" Tests you can log in without email verification """
self.common_test_registration_email_verification_not_necessary_email()
@override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_registration_email_verification_neccessary_optional_email(self):
""" Tests you can log in without email verification """
self.common_test_registration_email_verification_not_necessary_email()
@override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_registration_email_verification_neccessary_username(self):
""" Tests you can log in without email verification """
self.common_test_registration_email_verification_not_necessary_username()
@override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_registration_email_verification_neccessary_optional_username(self):
""" Tests you can log in without email verification """
self.common_test_registration_email_verification_not_necessary_username()
@override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_registration_email_verification_neccessary_username_email(self):
""" Tests you canT log in without email verification for username & email auth. """
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'username': 'admin1', 'email': '[email protected]', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_registration_email_verification_neccessary_optional_username_email(self):
""" Tests you canT log in without email verification for username & email auth. """
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'username': 'admin1', 'email': '[email protected]', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_registration_email_verification_necessary_login_fail_username(self):
""" Tests you can log in without email verification """
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_registration_email_verification_necessary_login_fail_email(self):
""" Tests you can log in without email verification """
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'email': '[email protected]', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_registration_email_verification_necessary_login_fail_username_email(self):
""" Tests you can log in without email verification """
self.common_test_registration_basic({'username': 'admin_man', 'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'})
response = self.client.post(self.login_url, {'username': 'admin_man', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def common_registration_email_verification_neccessary_verified_login(self, login_data):
mail_count = len(mail.outbox)
reg_response = self.common_test_registration_basic(self.reusable_register_user_data1)
self.assertEquals(len(mail.outbox), mail_count + 1)
new_user = get_user_model().objects.latest('id')
login_response = self.client.post(self.login_url, login_data, format='json')
self.assertEquals(login_response.status_code, status.HTTP_400_BAD_REQUEST)
# verify email
email_confirmation = new_user.emailaddress_set.get(email=self.reusable_register_user_data1['email']).emailconfirmation_set.order_by('-created')[0]
verify_response = self.client.post(self.verify_url, {'key': email_confirmation.key}, format='json')
self.assertEquals(verify_response.status_code, status.HTTP_200_OK)
login_response = self.client.post(self.login_url, login_data, format='json')
self.assertEquals(login_response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_registration_email_verification_neccessary_verified_login_username(self):
""" Tests you can log in without email verification """
self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'})
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_registration_email_verification_neccessary_verified_login_email(self):
""" Tests you can log in without email verification """
self.common_registration_email_verification_neccessary_verified_login({'email': '[email protected]', 'password': 'password12'})
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_registration_email_verification_neccessary_verified_login_username_email(self):
""" Tests you can log in without email verification """
self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'})
"""
Password Reset Tests
====================
"""
def test_password_reset(self):
""" Test basic functionality of password reset. """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
payload = {'email': '[email protected]'}
response = self.client.post(self.password_reset_url, payload, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}')
@override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=True)
def test_password_reset_fail_no_user_with_email_no_notify_not_in_system(self):
""" Test basic functionality of password reset fails when there is no email on record (notify email not in system). """
payload = {'email': '[email protected]'}
response = self.client.post(self.password_reset_url, payload, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"error":"User with email doesn\'t exist. Did not send reset email."}')
@override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=False)
def test_password_reset_no_user_with_email_no_notify_not_in_system(self):
""" Test basic functionality of password reset fails when there is no email on record. """
payload = {'email': '[email protected]'}
response = self.client.post(self.password_reset_url, payload, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}')
def test_password_reset_confirm_fail_invalid_token(self):
""" Test password reset confirm fails if token is invalid. """
user = get_user_model().objects.create_user('admin', '[email protected]', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': url_kwargs['uid'],
'token': '-wrong-token-'
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"token":["Invalid value"]}')
def test_password_reset_confirm_fail_invalid_uid(self):
""" Test password reset confirm fails if uid is invalid. """
user = get_user_model().objects.create_user('admin', '[email protected]', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': 0,
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"uid":["Invalid value"]}')
def test_password_reset_confirm_fail_passwords_not_the_same(self):
""" Test password reset confirm fails if uid is invalid. """
user = get_user_model().objects.create_user('admin', '[email protected]', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_not_the_same_password',
'uid': url_kwargs['uid'],
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"new_password2":["The two password fields didn\'t match."]}')
def test_password_reset_confirm_login(self):
""" Tests password reset confirm works -> can login afterwards. """
user = get_user_model().objects.create_user('admin', '[email protected]', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': url_kwargs['uid'],
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.login_url, {'username': 'admin', 'email': '[email protected]', 'password': 'new_password'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_password_reset_confirm_login_fails_with_old_password(self):
""" Tests password reset confirm fails with old password. """
user = get_user_model().objects.create_user('admin', '[email protected]', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': url_kwargs['uid'],
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.login_url, {'username': 'admin', 'email': '[email protected]', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
"""
User Detail Tests
=================
"""
def test_user_details_get(self):
""" Test to retrieve user details. """
self.create_user_and_login()
response = self.client.get(self.user_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"username":"admin","email":"[email protected]","first_name":"","last_name":""}')
def test_user_details_put(self):
""" Test to put update user details. """
self.create_user_and_login()
response = self.client.put(self.user_url, {"username":"changed","email":"[email protected]","first_name":"changed","last_name":"name"}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"username":"changed","email":"[email protected]","first_name":"changed","last_name":"name"}')
def test_user_details_patch(self):
""" Test to patch update user details. """
self.create_user_and_login()
response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': '[email protected]'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"username":"changed_username","email":"[email protected]","first_name":"","last_name":""}')
def test_user_details_put_not_authenticated(self):
""" Test to put update user details. """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
response = self.client.put(self.user_url, {"username":"changed","email":"[email protected]","first_name":"changed","last_name":"name"}, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_details_patch_not_authenticated(self):
""" Test to patch update user details. """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': '[email protected]'}, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_details_get_not_authenticated(self):
""" Test to retrieve user details. """
get_user_model().objects.create_user('admin', '[email protected]', 'password12')
response = self.client.get(self.user_url, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
class TestAccountsSocial(APITestCase):
""" Tests normal for social login. """
urls = 'accounts.test_social_urls'
def setUp(self):
self.fb_login_url = reverse('fb_login')
social_app = SocialApp.objects.create(
provider='facebook',
name='Facebook',
client_id='123123123',
secret='321321321',
)
site = Site.objects.get_current()
social_app.sites.add(site)
self.graph_api_url = GRAPH_API_URL + '/me'
@responses.activate
def test_social_auth(self):
""" Tests Social Login. """
resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa
responses.add(
responses.GET,
self.graph_api_url,
body=resp_body,
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.data)
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
@responses.activate
def test_social_auth_only_one_user_created(self):
""" Tests Social Login. """
resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa
responses.add(
responses.GET,
self.graph_api_url,
body=resp_body,
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.data)
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
# make sure that second request will not create a new user
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.data)
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
@responses.activate
def test_failed_social_auth(self):
# fake response
responses.add(
responses.GET,
self.graph_api_url,
body='',
status=400,
content_type='application/json'
)
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
| JTarball/docker-django-polymer | docker/app/app/backend/apps/accounts/test_views.py | Python | gpl-2.0 | 42,275 |
"""
This page is in the table of contents.
Plugin to home the tool at beginning of each layer.
The home manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home
==Operation==
The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Name of Home File===
Default: home.gcode
At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
==Examples==
The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py.
> python home.py
This brings up the home dialog.
> python home.py Screw Holder Bottom.stl
The home tool is parsing the file:
Screw Holder Bottom.stl
..
The home tool has created the file:
.. Screw Holder Bottom_home.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, repository = None ):
"Home a gcode linear move file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( gcodeText, repository = None ):
"Home a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( HomeRepository() )
if not repository.activateHome.value:
return gcodeText
return HomeSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return HomeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Home a gcode linear move file. Chain home the gcode if it is not already homed."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze)
class HomeRepository:
"A class to handle the home settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home')
self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True )
self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode')
self.executeTitle = 'Home'
def execute(self):
"Home button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HomeSkein:
"A class to home a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.highestZ = None
self.homeLines = []
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.shouldHome = False
self.travelFeedRateMinute = 957.0
def addFloat( self, begin, end ):
"Add dive to the original height."
beginEndDistance = begin.distance(end)
alongWay = self.absolutePerimeterWidth / beginEndDistance
closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin )
closeToEnd.z = self.highestZ
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) )
def addHomeTravel( self, splitLine ):
"Add the home travel gcode."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max( self.highestZ, location.z )
if not self.shouldHome:
return
self.shouldHome = False
if self.oldLocation == None:
return
if self.extruderActive:
self.distanceFeedRate.addLine('M103')
self.addHopUp( self.oldLocation )
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines)
self.addHopUp( self.oldLocation )
self.addFloat( self.oldLocation, location )
if self.extruderActive:
self.distanceFeedRate.addLine('M101')
def addHopUp(self, location):
"Add hop to highest point."
locationUp = Vector3( location.x, location.y, self.highestZ )
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) )
def getCraftedGcode( self, gcodeText, repository ):
"Parse gcode text and store the home gcode."
self.repository = repository
self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value)
if len(self.homeLines) < 1:
return gcodeText
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('home')
return
elif firstWord == '(<perimeterWidth>':
self.absolutePerimeterWidth = abs(float(splitLine[1]))
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.addHomeTravel(splitLine)
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('home')
if len(self.homeLines) > 0:
self.shouldHome = True
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.distanceFeedRate.addLine(line)
def main():
"Display the home dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| makerbot/ReplicatorG | skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/home.py | Python | gpl-2.0 | 8,040 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import logging.handlers
# Custom debug levels
logging.MSG = logging.INFO - 2
logging.TRACEDEBUG = 7
logging.HEAVYDEBUG = 5
logging.addLevelName(logging.MSG, 'MSG')
logging.addLevelName(logging.TRACEDEBUG, 'TRACE')
logging.addLevelName(logging.HEAVYDEBUG, 'HEAVY')
"""
Below derived from:
https://mail.python.org/pipermail/tutor/2007-August/056243.html
"""
logging.NOTICE = logging.INFO + 5
logging.addLevelName(logging.NOTICE, 'NOTICE')
# define a new logger function for notice
# this is exactly like existing info, critical, debug...etc
def _Logger_notice(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'NOTICE'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.notice("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(logging.NOTICE):
self._log(logging.NOTICE, msg, args, **kwargs)
logging.Logger.notice = _Logger_notice
# define a new root level notice function
# this is exactly like existing info, critical, debug...etc
def _root_notice(msg, *args, **kwargs):
"""
Log a message with severity 'NOTICE' on the root logger.
"""
if len(logging.root.handlers) == 0:
logging.basicConfig()
logging.root.notice(msg, *args, **kwargs)
# make the notice root level function known
logging.notice = _root_notice
# add NOTICE to the priority map of all the levels
logging.handlers.SysLogHandler.priority_map['NOTICE'] = 'notice'
from time import strptime
# strptime thread safety hack-around - http://bugs.python.org/issue7980
strptime("2012", "%Y")
# short names for pure numeric log-level ("Level 25" could be truncated by short formats):
def _init():
for i in range(50):
if logging.getLevelName(i).startswith('Level'):
logging.addLevelName(i, '#%02d-Lev.' % i)
_init()
| nawawi/fail2ban | fail2ban/__init__.py | Python | gpl-2.0 | 2,770 |
# -*- coding: utf-8 -*-
#
# python-problem documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 4 12:03:58 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../problem/.libs')) # _pyabrt
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'abrt-python'
copyright = u'2012, Richard Marko'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'abrt-pythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'abrt-python.tex', u'abrt-python Documentation',
u'Richard Marko', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'abrt-python', u'abrt-python Documentation',
[u'Richard Marko'], 5)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'abrt-python', u'abrt-python Documentation',
u'Richard Marko', 'abrt-python', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
def setup(app):
app.connect('autodoc-process-signature', process_signature)
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if what not in ('function'):
return
new_params = list()
for param in (x.strip() for x in signature[1:-1].split(',')):
if '__' not in param:
new_params.append(param)
return ('(%s)' % ', '.join(new_params), return_annotation)
| mhabrnal/abrt | src/python-problem/doc/conf.py | Python | gpl-2.0 | 8,344 |
'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class Shell(object):
"""
"""
def __init__(self, controller, target_list,
threshold=.01, pen_down=False):
"""
control Control instance: the controller to use
pen_down boolean: True if the end-effector is drawing
"""
self.controller = controller
self.pen_down = pen_down
self.target_list = target_list
self.threshold = threshold
self.not_at_start = True
self.target_index = 0
self.set_target()
def control(self, arm):
"""Move to a series of targets.
"""
if self.controller.check_distance(arm) < self.threshold:
if self.target_index < len(self.target_list)-1:
self.target_index += 1
self.set_target()
self.controller.apply_noise = True
self.not_at_start = not self.not_at_start
self.pen_down = not self.pen_down
self.u = self.controller.control(arm)
return self.u
def set_target(self):
"""
Set the current target for the controller.
"""
if self.target_index == len(self.target_list)-1:
target = [1, 2]
else:
target = self.target_list[self.target_index]
if target[0] != target[0]: # if it's NANs
self.target_index += 1
self.set_target()
else:
self.controller.target = target
| russellgeoff/blog | Control/Controllers/target_list.py | Python | gpl-3.0 | 2,121 |
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 Erik Svensson <[email protected]>
# Licensed under the MIT license.
import sys
import datetime
from core.transmissionrpc.constants import PRIORITY, RATIO_LIMIT, IDLE_LIMIT
from core.transmissionrpc.utils import Field, format_timedelta
from six import integer_types, string_types, text_type, iteritems
def get_status_old(code):
"""Get the torrent status using old status codes"""
mapping = {
(1 << 0): 'check pending',
(1 << 1): 'checking',
(1 << 2): 'downloading',
(1 << 3): 'seeding',
(1 << 4): 'stopped',
}
return mapping[code]
def get_status_new(code):
"""Get the torrent status using new status codes"""
mapping = {
0: 'stopped',
1: 'check pending',
2: 'checking',
3: 'download pending',
4: 'downloading',
5: 'seed pending',
6: 'seeding',
}
return mapping[code]
class Torrent(object):
"""
Torrent is a class holding the data received from Transmission regarding a bittorrent transfer.
All fetched torrent fields are accessible through this class using attributes.
This class has a few convenience properties using the torrent data.
"""
def __init__(self, client, fields):
if 'id' not in fields:
raise ValueError('Torrent requires an id')
self._fields = {}
self._update_fields(fields)
self._incoming_pending = False
self._outgoing_pending = False
self._client = client
def _get_name_string(self, codec=None):
"""Get the name"""
if codec is None:
codec = sys.getdefaultencoding()
name = None
# try to find name
if 'name' in self._fields:
name = self._fields['name'].value
# if name is unicode, try to decode
if isinstance(name, text_type):
try:
name = name.encode(codec)
except UnicodeError:
name = None
return name
def __repr__(self):
tid = self._fields['id'].value
name = self._get_name_string()
if isinstance(name, str):
return '<Torrent {0:d} \"{1}\">'.format(tid, name)
else:
return '<Torrent {0:d}>'.format(tid)
def __str__(self):
name = self._get_name_string()
if isinstance(name, str):
return 'Torrent \"{0}\"'.format(name)
else:
return 'Torrent'
def __copy__(self):
return Torrent(self._client, self._fields)
def __getattr__(self, name):
try:
return self._fields[name].value
except KeyError:
raise AttributeError('No attribute {0}'.format(name))
def _rpc_version(self):
"""Get the Transmission RPC API version."""
if self._client:
return self._client.rpc_version
return 2
def _dirty_fields(self):
"""Enumerate changed fields"""
outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition',
'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit',
'uploadLimited']
fields = []
for key in outgoing_keys:
if key in self._fields and self._fields[key].dirty:
fields.append(key)
return fields
def _push(self):
"""Push changed fields to the server"""
dirty = self._dirty_fields()
args = {}
for key in dirty:
args[key] = self._fields[key].value
self._fields[key] = self._fields[key]._replace(dirty=False)
if len(args) > 0:
self._client.change_torrent(self.id, **args)
def _update_fields(self, other):
"""
Update the torrent data from a Transmission JSON-RPC arguments dictionary
"""
if isinstance(other, dict):
for key, value in iteritems(other):
self._fields[key.replace('-', '_')] = Field(value, False)
elif isinstance(other, Torrent):
for key in list(other._fields.keys()):
self._fields[key] = Field(other._fields[key].value, False)
else:
raise ValueError('Cannot update with supplied data')
self._incoming_pending = False
def _status(self):
"""Get the torrent status"""
code = self._fields['status'].value
if self._rpc_version() >= 14:
return get_status_new(code)
else:
return get_status_old(code)
def files(self):
"""
Get list of files for this torrent.
This function returns a dictionary with file information for each file.
The file information is has following fields:
::
{
<file id>: {
'name': <file name>,
'size': <file size in bytes>,
'completed': <bytes completed>,
'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download>
}
...
}
"""
result = {}
if 'files' in self._fields:
files = self._fields['files'].value
indices = range(len(files))
priorities = self._fields['priorities'].value
wanted = self._fields['wanted'].value
for item in zip(indices, files, priorities, wanted):
selected = True if item[3] else False
priority = PRIORITY[item[2]]
result[item[0]] = {
'selected': selected,
'priority': priority,
'size': item[1]['length'],
'name': item[1]['name'],
'completed': item[1]['bytesCompleted']}
return result
@property
def status(self):
"""
Returns the torrent status. Is either one of 'check pending', 'checking',
'downloading', 'seeding' or 'stopped'. The first two is related to
verification.
"""
return self._status()
@property
def progress(self):
"""Get the download progress in percent."""
try:
size = self._fields['sizeWhenDone'].value
left = self._fields['leftUntilDone'].value
return 100.0 * (size - left) / float(size)
except ZeroDivisionError:
return 0.0
@property
def ratio(self):
"""Get the upload/download ratio."""
return float(self._fields['uploadRatio'].value)
@property
def eta(self):
"""Get the "eta" as datetime.timedelta."""
eta = self._fields['eta'].value
if eta >= 0:
return datetime.timedelta(seconds=eta)
else:
raise ValueError('eta not valid')
@property
def date_active(self):
"""Get the attribute "activityDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['activityDate'].value)
@property
def date_added(self):
"""Get the attribute "addedDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['addedDate'].value)
@property
def date_started(self):
"""Get the attribute "startDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['startDate'].value)
@property
def date_done(self):
"""Get the attribute "doneDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['doneDate'].value)
def format_eta(self):
"""
Returns the attribute *eta* formatted as a string.
* If eta is -1 the result is 'not available'
* If eta is -2 the result is 'unknown'
* Otherwise eta is formatted as <days> <hours>:<minutes>:<seconds>.
"""
eta = self._fields['eta'].value
if eta == -1:
return 'not available'
elif eta == -2:
return 'unknown'
else:
return format_timedelta(self.eta)
def _get_download_limit(self):
"""
Get the download limit.
Can be a number or None.
"""
if self._fields['downloadLimited'].value:
return self._fields['downloadLimit'].value
else:
return None
def _set_download_limit(self, limit):
"""
Get the download limit.
Can be a number, 'session' or None.
"""
if isinstance(limit, integer_types):
self._fields['downloadLimited'] = Field(True, True)
self._fields['downloadLimit'] = Field(limit, True)
self._push()
elif limit is None:
self._fields['downloadLimited'] = Field(False, True)
self._push()
else:
raise ValueError("Not a valid limit")
download_limit = property(_get_download_limit, _set_download_limit, None,
"Download limit in Kbps or None. This is a mutator.")
def _get_peer_limit(self):
"""
Get the peer limit.
"""
return self._fields['peer_limit'].value
def _set_peer_limit(self, limit):
"""
Set the peer limit.
"""
if isinstance(limit, integer_types):
self._fields['peer_limit'] = Field(limit, True)
self._push()
else:
raise ValueError("Not a valid limit")
peer_limit = property(_get_peer_limit, _set_peer_limit, None, "Peer limit. This is a mutator.")
def _get_priority(self):
"""
Get the priority as string.
Can be one of 'low', 'normal', 'high'.
"""
return PRIORITY[self._fields['bandwidthPriority'].value]
def _set_priority(self, priority):
"""
Set the priority as string.
Can be one of 'low', 'normal', 'high'.
"""
if isinstance(priority, string_types):
self._fields['bandwidthPriority'] = Field(PRIORITY[priority], True)
self._push()
priority = property(_get_priority, _set_priority, None
, "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.")
def _get_seed_idle_limit(self):
"""
Get the seed idle limit in minutes.
"""
return self._fields['seedIdleLimit'].value
def _set_seed_idle_limit(self, limit):
"""
Set the seed idle limit in minutes.
"""
if isinstance(limit, integer_types):
self._fields['seedIdleLimit'] = Field(limit, True)
self._push()
else:
raise ValueError("Not a valid limit")
seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None
, "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.")
def _get_seed_idle_mode(self):
"""
Get the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
"""
return IDLE_LIMIT[self._fields['seedIdleMode'].value]
def _set_seed_idle_mode(self, mode):
"""
Set the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
"""
if isinstance(mode, str):
self._fields['seedIdleMode'] = Field(IDLE_LIMIT[mode], True)
self._push()
else:
raise ValueError("Not a valid limit")
seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None,
"""
Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'.
* global, use session seed idle limit.
* single, use torrent seed idle limit. See seed_idle_limit.
* unlimited, no seed idle limit.
This is a mutator.
"""
)
def _get_seed_ratio_limit(self):
"""
Get the seed ratio limit as float.
"""
return float(self._fields['seedRatioLimit'].value)
def _set_seed_ratio_limit(self, limit):
"""
Set the seed ratio limit as float.
"""
if isinstance(limit, (integer_types, float)) and limit >= 0.0:
self._fields['seedRatioLimit'] = Field(float(limit), True)
self._push()
else:
raise ValueError("Not a valid limit")
seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None
, "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.")
def _get_seed_ratio_mode(self):
"""
Get the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
"""
return RATIO_LIMIT[self._fields['seedRatioMode'].value]
def _set_seed_ratio_mode(self, mode):
"""
Set the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
"""
if isinstance(mode, str):
self._fields['seedRatioMode'] = Field(RATIO_LIMIT[mode], True)
self._push()
else:
raise ValueError("Not a valid limit")
seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None,
"""
Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
* global, use session seed ratio limit.
* single, use torrent seed ratio limit. See seed_ratio_limit.
* unlimited, no seed ratio limit.
This is a mutator.
"""
)
def _get_upload_limit(self):
"""
Get the upload limit.
Can be a number or None.
"""
if self._fields['uploadLimited'].value:
return self._fields['uploadLimit'].value
else:
return None
def _set_upload_limit(self, limit):
"""
Set the upload limit.
Can be a number, 'session' or None.
"""
if isinstance(limit, integer_types):
self._fields['uploadLimited'] = Field(True, True)
self._fields['uploadLimit'] = Field(limit, True)
self._push()
elif limit is None:
self._fields['uploadLimited'] = Field(False, True)
self._push()
else:
raise ValueError("Not a valid limit")
upload_limit = property(_get_upload_limit, _set_upload_limit, None,
"Upload limit in Kbps or None. This is a mutator.")
def _get_queue_position(self):
"""Get the queue position for this torrent."""
if self._rpc_version() >= 14:
return self._fields['queuePosition'].value
else:
return 0
def _set_queue_position(self, position):
"""Set the queue position for this torrent."""
if self._rpc_version() >= 14:
if isinstance(position, integer_types):
self._fields['queuePosition'] = Field(position, True)
self._push()
else:
raise ValueError("Not a valid position")
else:
pass
queue_position = property(_get_queue_position, _set_queue_position, None, "Queue position")
def update(self, timeout=None):
"""Update the torrent information."""
self._push()
torrent = self._client.get_torrent(self.id, timeout=timeout)
self._update_fields(torrent)
def start(self, bypass_queue=False, timeout=None):
"""
Start the torrent.
"""
self._incoming_pending = True
self._client.start_torrent(self.id, bypass_queue=bypass_queue, timeout=timeout)
def stop(self, timeout=None):
"""Stop the torrent."""
self._incoming_pending = True
self._client.stop_torrent(self.id, timeout=timeout)
def move_data(self, location, timeout=None):
"""Move torrent data to location."""
self._incoming_pending = True
self._client.move_torrent_data(self.id, location, timeout=timeout)
def locate_data(self, location, timeout=None):
"""Locate torrent data at location."""
self._incoming_pending = True
self._client.locate_torrent_data(self.id, location, timeout=timeout)
| bbsan2k/nzbToMedia | core/transmissionrpc/torrent.py | Python | gpl-3.0 | 16,349 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.research.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from nupic.research.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from sensorimotor.fast_general_temporal_memory import (
FastGeneralTemporalMemory as GeneralTemporalMemory)
# Uncomment the line below to use GeneralTemporalMemory
# from sensorimotor.general_temporal_memory import GeneralTemporalMemory
from sensorimotor.temporal_pooler import TemporalPooler
# Uncomment the line below to use SpatialTemporalPooler
# from sensorimotor.spatial_temporal_pooler import SpatialTemporalPooler as TemporalPooler
from sensorimotor.temporal_pooler_monitor_mixin import (
TemporalPoolerMonitorMixin)
class MonitoredGeneralTemporalMemory(TemporalMemoryMonitorMixin,
GeneralTemporalMemory): pass
class MonitoredTemporalPooler(TemporalPoolerMonitorMixin, TemporalPooler): pass
"""
Experiment runner class for running networks with layer 4 and layer 3. The
client is responsible for setting up universes, agents, and worlds. This
class just sets up and runs the HTM learning algorithms.
"""
realDType = GetNTAReal()
class SensorimotorExperimentRunner(object):
DEFAULT_TM_PARAMS = {
# These should be decent for most experiments, shouldn't need to override
# these too often. Might want to increase cellsPerColumn for capacity
# experiments.
"cellsPerColumn": 8,
"initialPermanence": 0.5,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
# We will force client to override these
"columnDimensions": "Sorry",
"minThreshold": "Sorry",
"maxNewSynapseCount": "Sorry",
"activationThreshold": "Sorry",
}
DEFAULT_TP_PARAMS = {
# Need to check these parameters and find stable values that will be
# consistent across most experiments.
"synPermInactiveDec": 0, # TODO: Check we can use class default here.
"synPermActiveInc": 0.001, # TODO: Check we can use class default here.
"synPredictedInc": 0.5, # TODO: Why so high??
"potentialPct": 0.9, # TODO: need to check impact of this for pooling
"initConnectedPct": 0.5, # TODO: need to check impact of this for pooling
"poolingThreshUnpredicted": 0.0,
# We will force client to override these
"numActiveColumnsPerInhArea": "Sorry",
}
def __init__(self, tmOverrides=None, tpOverrides=None, seed=42):
# Initialize Layer 4 temporal memory
params = dict(self.DEFAULT_TM_PARAMS)
params.update(tmOverrides or {})
params["seed"] = seed
self._checkParams(params)
self.tm = MonitoredGeneralTemporalMemory(mmName="TM", **params)
# Initialize Layer 3 temporal pooler
params = dict(self.DEFAULT_TP_PARAMS)
params["inputDimensions"] = [self.tm.numberOfCells()]
params["potentialRadius"] = self.tm.numberOfCells()
params["seed"] = seed
params.update(tpOverrides or {})
self._checkParams(params)
self.tp = MonitoredTemporalPooler(mmName="TP", **params)
def _checkParams(self, params):
for k,v in params.iteritems():
if v == "Sorry":
raise RuntimeError("Param "+k+" must be specified")
def feedTransition(self, sensorPattern, motorPattern, sensorimotorPattern,
tmLearn=True, tpLearn=None, sequenceLabel=None):
if sensorPattern is None:
self.tm.reset()
self.tp.reset()
else:
# Feed the TM
self.tm.compute(sensorPattern,
activeExternalCells=motorPattern,
formInternalConnections=True,
learn=tmLearn,
sequenceLabel=sequenceLabel)
# If requested, feed the TP
if tpLearn is not None:
tpInputVector, burstingColumns, correctlyPredictedCells = (
self.formatInputForTP())
activeArray = numpy.zeros(self.tp.getNumColumns())
self.tp.compute(tpInputVector,
tpLearn,
activeArray,
burstingColumns,
correctlyPredictedCells,
sequenceLabel=sequenceLabel)
def feedLayers(self, sequences, tmLearn=True, tpLearn=None, verbosity=0,
showProgressInterval=None):
"""
Feed the given sequences to the HTM algorithms.
@param tmLearn: (bool) Either False, or True
@param tpLearn: (None,bool) Either None, False, or True. If None,
temporal pooler will be skipped.
@param showProgressInterval: (int) Prints progress every N iterations,
where N is the value of this param
"""
(sensorSequence,
motorSequence,
sensorimotorSequence,
sequenceLabels) = sequences
currentTime = time.time()
for i in xrange(len(sensorSequence)):
sensorPattern = sensorSequence[i]
motorPattern = motorSequence[i]
sensorimotorPattern = sensorimotorSequence[i]
sequenceLabel = sequenceLabels[i]
self.feedTransition(sensorPattern, motorPattern, sensorimotorPattern,
tmLearn=tmLearn, tpLearn=tpLearn,
sequenceLabel=sequenceLabel)
if (showProgressInterval is not None and
i > 0 and
i % showProgressInterval == 0):
print ("Fed {0} / {1} elements of the sequence "
"in {2:0.2f} seconds.".format(
i, len(sensorSequence), time.time() - currentTime))
currentTime = time.time()
if verbosity >= 2:
# Print default TM traces
traces = self.tm.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tm.mmGetTraceResets())
if tpLearn is not None:
# Print default TP traces
traces = self.tp.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tp.mmGetTraceResets())
print
@staticmethod
def generateSequences(length, agents, numSequences=1, verbosity=0):
"""
@param length (int) Length of each sequence to generate, one for
each agent
@param agents (AbstractAgent) Agents acting in their worlds
@return (tuple) (sensor sequence, motor sequence, sensorimotor sequence,
sequence labels)
"""
sensorSequence = []
motorSequence = []
sensorimotorSequence = []
sequenceLabels = []
for _ in xrange(numSequences):
for agent in agents:
s,m,sm = agent.generateSensorimotorSequence(length, verbosity=verbosity)
sensorSequence += s
motorSequence += m
sensorimotorSequence += sm
sequenceLabels += [agent.world.toString()] * length
sensorSequence.append(None)
motorSequence.append(None)
sensorimotorSequence.append(None)
sequenceLabels.append(None)
return sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels
def formatInputForTP(self):
"""
Given an instance of the TM, format the information we need to send to the
TP.
"""
# all currently active cells in layer 4
tpInputVector = numpy.zeros(
self.tm.numberOfCells()).astype(realDType)
tpInputVector[list(self.tm.activeCellsIndices())] = 1
# bursting columns in layer 4
burstingColumns = numpy.zeros(
self.tm.numberOfColumns()).astype(realDType)
burstingColumns[list(self.tm.unpredictedActiveColumns)] = 1
# correctly predicted cells in layer 4
correctlyPredictedCells = numpy.zeros(
self.tm.numberOfCells()).astype(realDType)
correctlyPredictedCells[list(self.tm.predictedActiveCellsIndices())] = 1
return tpInputVector, burstingColumns, correctlyPredictedCells
def formatRow(self, x, formatString = "%d", rowSize = 700):
"""
Utility routine for pretty printing large vectors
"""
s = ''
for c,v in enumerate(x):
if c > 0 and c % 7 == 0:
s += ' '
if c > 0 and c % rowSize == 0:
s += '\n'
s += formatString % v
s += ' '
return s
| pford68/nupic.research | sensorimotor/sensorimotor/sensorimotor_experiment_runner.py | Python | gpl-3.0 | 9,403 |
"""
Tests for Blocks api.py
"""
from django.test.client import RequestFactory
from course_blocks.tests.helpers import EnableTransformerRegistryMixin
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..api import get_blocks
class TestGetBlocks(EnableTransformerRegistryMixin, SharedModuleStoreTestCase):
"""
Tests for the get_blocks function
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocks, cls).setUpClass()
cls.course = SampleCourseFactory.create()
# hide the html block
cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1'))
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestGetBlocks, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_basic(self):
blocks = get_blocks(self.request, self.course.location, self.user)
self.assertEquals(blocks['root'], unicode(self.course.location))
# subtract for (1) the orphaned course About block and (2) the hidden Html block
self.assertEquals(len(blocks['blocks']), len(self.store.get_items(self.course.id)) - 2)
self.assertNotIn(unicode(self.html_block.location), blocks['blocks'])
def test_no_user(self):
blocks = get_blocks(self.request, self.course.location)
self.assertIn(unicode(self.html_block.location), blocks['blocks'])
def test_access_before_api_transformer_order(self):
"""
Tests the order of transformers: access checks are made before the api
transformer is applied.
"""
blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth'])
vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a'))
problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1'))
vertical_descendants = blocks['blocks'][unicode(vertical_block.location)]['descendants']
self.assertIn(unicode(problem_block.location), vertical_descendants)
self.assertNotIn(unicode(self.html_block.location), vertical_descendants)
| antoviaque/edx-platform | lms/djangoapps/course_api/blocks/tests/test_api.py | Python | agpl-3.0 | 2,533 |
"""This *was* the parser for the current HTML format on parl.gc.ca.
But now we have XML. See parl_document.py.
This module is organized like so:
__init__.py - utility functions, simple parse interface
common.py - infrastructure used in the parsers, i.e. regexes
current.py - parser for the Hansard format used from 2006 to the present
old.py - (fairly crufty) parser for the format used from 1994 to 2006
"""
from parliament.imports.hans_old.common import *
import logging
logger = logging.getLogger(__name__)
class HansardParser2009(HansardParser):
def __init__(self, hansard, html):
for regex in STARTUP_RE_2009:
html = re.sub(regex[0], regex[1], html)
super(HansardParser2009, self).__init__(hansard, html)
for x in self.soup.findAll('a', 'deleteMe'):
x.findParent('div').extract()
def process_related_link(self, tag, string, current_politician=None):
#print "PROCESSING RELATED for %s" % string
resid = re.search(r'ResourceID=(\d+)', tag['href'])
restype = re.search(r'ResourceType=(Document|Affiliation)', tag['href'])
if not resid and restype:
return string
resid, restype = int(resid.group(1)), restype.group(1)
if restype == 'Document':
try:
bill = Bill.objects.get_by_legisinfo_id(resid)
except Bill.DoesNotExist:
match = re.search(r'\b[CS]\-\d+[A-E]?\b', string)
if not match:
logger.error("Invalid bill link %s" % string)
return string
bill = Bill.objects.create_temporary_bill(legisinfo_id=resid,
number=match.group(0), session=self.hansard.session)
except Exception, e:
print "Related bill search failed for callback %s" % resid
print repr(e)
return string
return u'<bill id="%d" name="%s">%s</bill>' % (bill.id, escape(bill.name), string)
elif restype == 'Affiliation':
try:
pol = Politician.objects.getByParlID(resid)
except Politician.DoesNotExist:
print "Related politician search failed for callback %s" % resid
if getattr(settings, 'PARLIAMENT_LABEL_FAILED_CALLBACK', False):
# FIXME migrate away from internalxref?
InternalXref.objects.get_or_create(schema='pol_parlid', int_value=resid, target_id=-1)
return string
if pol == current_politician:
return string # When someone mentions her riding, don't link back to her
return u'<pol id="%d" name="%s">%s</pol>' % (pol.id, escape(pol.name), string)
def get_text(self, cursor):
text = u''
for string in cursor.findAll(text=parsetools.r_hasText):
if string.parent.name == 'a' and string.parent['class'] == 'WebOption':
text += self.process_related_link(string.parent, string, self.t['politician'])
else:
text += unicode(string)
return text
def parse(self):
super(HansardParser2009, self).parse()
# Initialize variables
t = ParseTracker()
self.t = t
member_refs = {}
# Get the date
c = self.soup.find(text='OFFICIAL REPORT (HANSARD)').findNext('h2')
self.date = datetime.datetime.strptime(c.string.strip(), "%A, %B %d, %Y").date()
self.hansard.date = self.date
self.hansard.save()
c = c.findNext(text=r_housemet)
match = re.search(r_housemet, c.string)
t['timestamp'] = self.houseTime(match.group(1), match.group(2))
t.setNext('timestamp', t['timestamp'])
# Move the pointer to the start
c = c.next
# And start the big loop
while c is not None:
# It's a string
if not hasattr(c, 'name'):
pass
# Heading
elif c.name == 'h2':
c = c.next
if not parsetools.isString(c): raise ParseException("Expecting string right after h2")
t.setNext('heading', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip())))
# Topic
elif c.name == 'h3':
top = c.find(text=r_letter)
#if not parsetools.isString(c):
# check if it's an empty header
# if c.parent.find(text=r_letter):
# raise ParseException("Expecting string right after h3")
if top is not None:
c = top
t['topic_set'] = True
t.setNext('topic', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip())))
elif c.name == 'h4':
if c.string == 'APPENDIX':
self.saveStatement(t)
print "Appendix reached -- we're done!"
break
# Timestamp
elif c.name == 'a' and c.has_key('name') and c['name'].startswith('T'):
match = re.search(r'^T(\d\d)(\d\d)$', c['name'])
if match:
t.setNext('timestamp', parsetools.time_to_datetime(
hour=int(match.group(1)),
minute=int(match.group(2)),
date=self.date))
else:
raise ParseException("Couldn't match time %s" % c.attrs['name'])
elif c.name == 'b' and c.string:
# Something to do with written answers
match = r_honorific.search(c.string)
if match:
# It's a politician asking or answering a question
# We don't get a proper link here, so this has to be a name match
polname = re.sub(r'\(.+\)', '', match.group(2)).strip()
self.saveStatement(t)
t['member_title'] = c.string.strip()
t['written_question'] = True
try:
pol = Politician.objects.get_by_name(polname, session=self.hansard.session)
t['politician'] = pol
t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date)
except Politician.DoesNotExist:
print "WARNING: No name match for %s" % polname
except Politician.MultipleObjectsReturned:
print "WARNING: Multiple pols for %s" % polname
else:
if not c.string.startswith('Question'):
print "WARNING: Unexplained boldness: %s" % c.string
# div -- the biggie
elif c.name == 'div':
origdiv = c
if c.find('b'):
# We think it's a new speaker
# Save the current buffer
self.saveStatement(t)
c = c.find('b')
if c.find('a'):
# There's a link...
c = c.find('a')
match = re.search(r'ResourceType=Affiliation&ResourceID=(\d+)', c['href'])
if match and c.find(text=r_letter):
parlwebid = int(match.group(1))
# We have the parl ID. First, see if we already know this ID.
pol = Politician.objects.getByParlID(parlwebid, lookOnline=False)
if pol is None:
# We don't. Try to do a quick name match first (if flags say so)
if not GET_PARLID_ONLINE:
who = c.next.string
match = re.search(r_honorific, who)
if match:
polname = re.sub(r'\(.+\)', '', match.group(2)).strip()
try:
#print "Looking for %s..." % polname,
pol = Politician.objects.get_by_name(polname, session=self.hansard.session)
#print "found."
except Politician.DoesNotExist:
pass
except Politician.MultipleObjectsReturned:
pass
if pol is None:
# Still no match. Go online...
try:
pol = Politician.objects.getByParlID(parlwebid, session=self.hansard.session)
except Politician.DoesNotExist:
print "WARNING: Couldn't find politician for ID %d" % parlwebid
if pol is not None:
t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date)
t['politician'] = pol
c = c.next
if not parsetools.isString(c): raise Exception("Expecting string in b for member name")
t['member_title'] = c.strip()
#print c
if t['member_title'].endswith(':'): # Remove colon in e.g. Some hon. members:
t['member_title'] = t['member_title'][:-1]
# Sometimes we don't get a link for short statements -- see if we can identify by backreference
if t['member']:
member_refs[t['member_title']] = t['member']
# Also save a backref w/o position/riding
member_refs[re.sub(r'\s*\(.+\)\s*', '', t['member_title'])] = t['member']
elif t['member_title'] in member_refs:
t['member'] = member_refs[t['member_title']]
t['politician'] = t['member'].politician
c.findParent('b').extract() # We've got the title, now get the rest of the paragraph
c = origdiv
t.addText(self.get_text(c))
else:
# There should be text in here
if c.find('div'):
if c.find('div', 'Footer'):
# We're done!
self.saveStatement(t)
print "Footer div reached -- done!"
break
raise Exception("I wasn't expecting another div in here")
txt = self.get_text(c).strip()
if r_proceedings.search(txt):
self.saveStatement(t)
self.saveProceedingsStatement(txt, t)
else:
t.addText(txt, blockquote=bool(c.find('small')))
else:
#print c.name
if c.name == 'b':
print "B: ",
print c
#if c.name == 'p':
# print "P: ",
# print c
c = c.next
return self.statements
| twhyte/openparliament | parliament/imports/hans_old/current.py | Python | agpl-3.0 | 11,722 |
import re
import uuid
from xmodule.assetstore.assetmgr import AssetManager
XASSET_LOCATION_TAG = 'c4x'
XASSET_SRCREF_PREFIX = 'xasset:'
XASSET_THUMBNAIL_TAIL_NAME = '.jpg'
STREAM_DATA_CHUNK_SIZE = 1024
import os
import logging
import StringIO
from urlparse import urlparse, urlunparse, parse_qsl
from urllib import urlencode
from opaque_keys.edx.locator import AssetLocator
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys import InvalidKeyError
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.exceptions import NotFoundError
from PIL import Image
class StaticContent(object):
def __init__(self, loc, name, content_type, data, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False):
self.location = loc
self.name = name # a display string which can be edited, and thus not part of the location which needs to be fixed
self.content_type = content_type
self._data = data
self.length = length
self.last_modified_at = last_modified_at
self.thumbnail_location = thumbnail_location
# optional information about where this file was imported from. This is needed to support import/export
# cycles
self.import_path = import_path
self.locked = locked
@property
def is_thumbnail(self):
return self.location.category == 'thumbnail'
@staticmethod
def generate_thumbnail_name(original_name, dimensions=None):
"""
- original_name: Name of the asset (typically its location.name)
- dimensions: `None` or a tuple of (width, height) in pixels
"""
name_root, ext = os.path.splitext(original_name)
if not ext == XASSET_THUMBNAIL_TAIL_NAME:
name_root = name_root + ext.replace(u'.', u'-')
if dimensions:
width, height = dimensions # pylint: disable=unpacking-non-sequence
name_root += "-{}x{}".format(width, height)
return u"{name_root}{extension}".format(
name_root=name_root,
extension=XASSET_THUMBNAIL_TAIL_NAME,
)
@staticmethod
def compute_location(course_key, path, revision=None, is_thumbnail=False):
"""
Constructs a location object for static content.
- course_key: the course that this asset belongs to
- path: is the name of the static asset
- revision: is the object's revision information
- is_thumbnail: is whether or not we want the thumbnail version of this
asset
"""
path = path.replace('/', '_')
return course_key.make_asset_key(
'asset' if not is_thumbnail else 'thumbnail',
AssetLocator.clean_keeping_underscores(path)
).for_branch(None)
def get_id(self):
return self.location
@property
def data(self):
return self._data
ASSET_URL_RE = re.compile(r"""
/?c4x/
(?P<org>[^/]+)/
(?P<course>[^/]+)/
(?P<category>[^/]+)/
(?P<name>[^/]+)
""", re.VERBOSE | re.IGNORECASE)
@staticmethod
def is_c4x_path(path_string):
"""
Returns a boolean if a path is believed to be a c4x link based on the leading element
"""
return StaticContent.ASSET_URL_RE.match(path_string) is not None
@staticmethod
def get_static_path_from_location(location):
"""
This utility static method will take a location identifier and create a 'durable' /static/.. URL representation of it.
This link is 'durable' as it can maintain integrity across cloning of courseware across course-ids, e.g. reruns of
courses.
In the LMS/CMS, we have runtime link-rewriting, so at render time, this /static/... format will get translated into
the actual /c4x/... path which the client needs to reference static content
"""
if location is not None:
return u"/static/{name}".format(name=location.name)
else:
return None
@staticmethod
def get_base_url_path_for_course_assets(course_key):
if course_key is None:
return None
assert isinstance(course_key, CourseKey)
placeholder_id = uuid.uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = StaticContent.serialize_asset_key_with_slash(
course_key.make_asset_key('asset', placeholder_id).for_branch(None)
)
return url_path.replace(placeholder_id, '')
@staticmethod
def get_location_from_path(path):
"""
Generate an AssetKey for the given path (old c4x/org/course/asset/name syntax)
"""
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# TODO - re-address this once LMS-11198 is tackled.
if path.startswith('/'):
# try stripping off the leading slash and try again
return AssetKey.from_string(path[1:])
@staticmethod
def get_asset_key_from_path(course_key, path):
"""
Parses a path, extracting an asset key or creating one.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
AssetKey: the asset key that represents the path
"""
# Clean up the path, removing any static prefix and any leading slash.
if path.startswith('/static/'):
path = path[len('/static/'):]
path = path.lstrip('/')
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# If we couldn't parse the path, just let compute_location figure it out.
# It's most likely a path like /image.png or something.
return StaticContent.compute_location(course_key, path)
@staticmethod
def get_canonicalized_asset_path(course_key, path, base_url):
"""
Returns a fully-qualified path to a piece of static content.
If a static asset CDN is configured, this path will include it.
Otherwise, the path will simply be relative.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
string: fully-qualified path to asset
"""
# Break down the input path.
_, _, relative_path, params, query_string, fragment = urlparse(path)
# Convert our path to an asset key if it isn't one already.
asset_key = StaticContent.get_asset_key_from_path(course_key, relative_path)
# Check the status of the asset to see if this can be served via CDN aka publicly.
serve_from_cdn = False
try:
content = AssetManager.find(asset_key, as_stream=True)
is_locked = getattr(content, "locked", True)
serve_from_cdn = not is_locked
except (ItemNotFoundError, NotFoundError):
# If we can't find the item, just treat it as if it's locked.
serve_from_cdn = False
# Update any query parameter values that have asset paths in them. This is for assets that
# require their own after-the-fact values, like a Flash file that needs the path of a config
# file passed to it e.g. /static/visualization.swf?configFile=/static/visualization.xml
query_params = parse_qsl(query_string)
updated_query_params = []
for query_name, query_value in query_params:
if query_value.startswith("/static/"):
new_query_value = StaticContent.get_canonicalized_asset_path(course_key, query_value, base_url)
updated_query_params.append((query_name, new_query_value))
else:
updated_query_params.append((query_name, query_value))
serialized_asset_key = StaticContent.serialize_asset_key_with_slash(asset_key)
base_url = base_url if serve_from_cdn else ''
return urlunparse((None, base_url, serialized_asset_key, params, urlencode(updated_query_params), fragment))
def stream_data(self):
yield self._data
@staticmethod
def serialize_asset_key_with_slash(asset_key):
"""
Legacy code expects the serialized asset key to start w/ a slash; so, do that in one place
:param asset_key:
"""
url = unicode(asset_key)
if not url.startswith('/'):
url = '/' + url # TODO - re-address this once LMS-11198 is tackled.
return url
class StaticContentStream(StaticContent):
def __init__(self, loc, name, content_type, stream, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False):
super(StaticContentStream, self).__init__(loc, name, content_type, None, last_modified_at=last_modified_at,
thumbnail_location=thumbnail_location, import_path=import_path,
length=length, locked=locked)
self._stream = stream
def stream_data(self):
while True:
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
if len(chunk) == 0:
break
yield chunk
def stream_data_in_range(self, first_byte, last_byte):
"""
Stream the data between first_byte and last_byte (included)
"""
self._stream.seek(first_byte)
position = first_byte
while True:
if last_byte < position + STREAM_DATA_CHUNK_SIZE - 1:
chunk = self._stream.read(last_byte - position + 1)
yield chunk
break
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
position += STREAM_DATA_CHUNK_SIZE
yield chunk
def close(self):
self._stream.close()
def copy_to_in_mem(self):
self._stream.seek(0)
content = StaticContent(self.location, self.name, self.content_type, self._stream.read(),
last_modified_at=self.last_modified_at, thumbnail_location=self.thumbnail_location,
import_path=self.import_path, length=self.length, locked=self.locked)
return content
class ContentStore(object):
'''
Abstraction for all ContentStore providers (e.g. MongoDB)
'''
def save(self, content):
raise NotImplementedError
def find(self, filename):
raise NotImplementedError
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
'''
Returns a list of static assets for a course, followed by the total number of assets.
By default all assets are returned, but start and maxresults can be provided to limit the query.
The return format is a list of asset data dictionaries.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
raise NotImplementedError
def delete_all_course_assets(self, course_key):
"""
Delete all of the assets which use this course_key as an identifier
:param course_key:
"""
raise NotImplementedError
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
Copy all the course assets from source_course_key to dest_course_key
"""
raise NotImplementedError
def generate_thumbnail(self, content, tempfile_path=None, dimensions=None):
"""Create a thumbnail for a given image.
Returns a tuple of (StaticContent, AssetKey)
`content` is the StaticContent representing the image you want to make a
thumbnail out of.
`tempfile_path` is a string path to the location of a file to read from
in order to grab the image data, instead of relying on `content.data`
`dimensions` is an optional param that represents (width, height) in
pixels. It defaults to None.
"""
thumbnail_content = None
# use a naming convention to associate originals with the thumbnail
thumbnail_name = StaticContent.generate_thumbnail_name(
content.location.name, dimensions=dimensions
)
thumbnail_file_location = StaticContent.compute_location(
content.location.course_key, thumbnail_name, is_thumbnail=True
)
# if we're uploading an image, then let's generate a thumbnail so that we can
# serve it up when needed without having to rescale on the fly
if content.content_type is not None and content.content_type.split('/')[0] == 'image':
try:
# use PIL to do the thumbnail generation (http://www.pythonware.com/products/pil/)
# My understanding is that PIL will maintain aspect ratios while restricting
# the max-height/width to be whatever you pass in as 'size'
# @todo: move the thumbnail size to a configuration setting?!?
if tempfile_path is None:
im = Image.open(StringIO.StringIO(content.data))
else:
im = Image.open(tempfile_path)
# I've seen some exceptions from the PIL library when trying to save palletted
# PNG files to JPEG. Per the google-universe, they suggest converting to RGB first.
im = im.convert('RGB')
if not dimensions:
dimensions = (128, 128)
im.thumbnail(dimensions, Image.ANTIALIAS)
thumbnail_file = StringIO.StringIO()
im.save(thumbnail_file, 'JPEG')
thumbnail_file.seek(0)
# store this thumbnail as any other piece of content
thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name,
'image/jpeg', thumbnail_file)
self.save(thumbnail_content)
except Exception, e:
# log and continue as thumbnails are generally considered as optional
logging.exception(u"Failed to generate thumbnail for {0}. Exception: {1}".format(content.location, str(e)))
return thumbnail_content, thumbnail_file_location
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
"""
pass
| MakeHer/edx-platform | common/lib/xmodule/xmodule/contentstore/content.py | Python | agpl-3.0 | 14,964 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2015 Julien Veyssier, Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import random
import urllib
from urlparse import urlsplit
from weboob.deprecated.browser import Browser, BrowserHTTPNotFound
from .pages.index import IndexPage
from .pages.torrents import TorrentPage, TorrentsPage
__all__ = ['PiratebayBrowser']
class PiratebayBrowser(Browser):
ENCODING = 'utf-8'
DOMAINS = ['thepiratebay.vg',
'thepiratebay.la',
'thepiratebay.mn',
'thepiratebay.gd']
def __init__(self, url, *args, **kwargs):
url = url or 'https://%s/' % random.choice(self.DOMAINS)
url_parsed = urlsplit(url)
self.PROTOCOL = url_parsed.scheme
self.DOMAIN = url_parsed.netloc
self.PAGES = {
'%s://%s/' % (self.PROTOCOL, self.DOMAIN): IndexPage,
'%s://%s/search/.*/0/7/0' % (self.PROTOCOL, self.DOMAIN): TorrentsPage,
'%s://%s/torrent/.*' % (self.PROTOCOL, self.DOMAIN): TorrentPage
}
Browser.__init__(self, *args, **kwargs)
def iter_torrents(self, pattern):
self.location('%s://%s/search/%s/0/7/0' % (self.PROTOCOL,
self.DOMAIN,
urllib.quote_plus(pattern.encode('utf-8'))))
assert self.is_on_page(TorrentsPage)
return self.page.iter_torrents()
def get_torrent(self, _id):
try:
self.location('%s://%s/torrent/%s/' % (self.PROTOCOL,
self.DOMAIN,
_id))
except BrowserHTTPNotFound:
return
if self.is_on_page(TorrentPage):
return self.page.get_torrent(_id)
| sputnick-dev/weboob | modules/piratebay/browser.py | Python | agpl-3.0 | 2,463 |
"""This module implement decorators for wrapping data sources so as to
simplify their construction and attribution of properties.
"""
import functools
def data_source_generator(name=None, **properties):
"""Decorator for applying to a simple data source which directly
returns an iterable/generator with the metrics for each sample. The
function the decorator is applied to must take no arguments.
"""
def _decorator(func):
@functools.wraps(func)
def _properties(settings):
def _factory(environ):
return func
d = dict(properties)
d['name'] = name
d['factory'] = _factory
return d
return _properties
return _decorator
def data_source_factory(name=None, **properties):
"""Decorator for applying to a data source defined as a factory. The
decorator can be applied to a class or a function. The class
constructor or function must accept arguments of 'settings', being
configuration settings for the data source, and 'environ' being
information about the context in which the data source is being
used. The resulting object must be a callable which directly returns
an iterable/generator with the metrics for each sample.
"""
def _decorator(func):
@functools.wraps(func)
def _properties(settings):
def _factory(environ):
return func(settings, environ)
d = dict(properties)
d['name'] = name
d['factory'] = _factory
return d
return _properties
return _decorator
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/samplers/decorators.py | Python | agpl-3.0 | 1,626 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
#License as published by the Free Software Foundation, either version
#3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
#DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
#is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
#Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
| gem/oq-hazardlib | openquake/hmtk/strain/regionalisation/__init__.py | Python | agpl-3.0 | 1,925 |
class Extension(object):
"""
Base class for creating extensions.
Args:
kwargs[dict]: All key, value pairings are stored as "configuration" options, see getConfigs.
"""
def __init__(self, **kwargs):
#: Configure options
self._configs = kwargs
self._configs.setdefault('headings', ['section', 'subsection', 'subsubsection', 'textbf', 'underline', 'emph'])
def getConfigs(self):
"""
Return the dictionary of configure options.
"""
return self._configs
def extend(self, translator):
"""
Elements should be added to the storage of the Translator instance within this function.
Args:
translator[Translator]: The object to be used for converting the html.
"""
pass
| vityurkiv/Ox | python/MooseDocs/html2latex/Extension.py | Python | lgpl-2.1 | 735 |
#!/usr/bin/python
import math
import Sofa
def tostr(L):
return str(L).replace('[', '').replace("]", '').replace(",", ' ')
def transform(T,p):
return [T[0][0]*p[0]+T[0][1]*p[1]+T[0][2]*p[2]+T[1][0],T[0][3]*p[0]+T[0][4]*p[1]+T[0][5]*p[2]+T[1][1],T[0][6]*p[0]+T[0][7]*p[1]+T[0][8]*p[2]+T[1][2]]
def transformF(T,F):
return [T[0][0]*F[0]+T[0][1]*F[3]+T[0][2]*F[6],T[0][0]*F[1]+T[0][1]*F[4]+T[0][2]*F[7],T[0][0]*F[2]+T[0][1]*F[5]+T[0][2]*F[8],T[0][3]*F[0]+T[0][4]*F[3]+T[0][5]*F[6],T[0][3]*F[1]+T[0][4]*F[4]+T[0][5]*F[7],T[0][3]*F[2]+T[0][4]*F[5]+T[0][5]*F[8],T[0][6]*F[0]+T[0][7]*F[3]+T[0][8]*F[6],T[0][6]*F[1]+T[0][7]*F[4]+T[0][8]*F[7],T[0][6]*F[2]+T[0][7]*F[5]+T[0][8]*F[8]]
def compare(p1,p2):
res = 0
for i,P1 in enumerate(p1):
for j,item in enumerate(P1):
res = res+ (item-p2[i][j])*(item-p2[i][j])
return res
ERRORTOL = 1e-5
T = [[2,0,0,0,2,0,0,0,2],[0,0,0]]
#T = [[0.8,1.2,0.3,0,1.9,0.45,0.5,2.8,0.2],[5,2,8]]
samples= [[0.5,0.5,0.5], [0.23,0.5,0.8], [0,0.12,0], [0.8,0,0.58]]
# scene creation method
def createScene(rootNode):
rootNode.createObject('RequiredPlugin', pluginName="Flexible")
rootNode.createObject('VisualStyle', displayFlags="showBehaviorModels")
restpos = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
pos = [transform(T,item) for item in restpos]
###########################################################
simNode = rootNode.createChild('Hexa_barycentric')
simNode.createObject('MeshTopology', name="mesh", position=tostr(restpos), hexahedra="0 1 3 2 4 5 7 6")
simNode.createObject('MechanicalObject', template="Vec3d", name="parent", rest_position="@mesh.position",position=tostr(pos) )
simNode.createObject('BarycentricShapeFunction', position="@parent.rest_position", nbRef="8")
childNode = simNode.createChild('childP')
childNode.createObject('MechanicalObject', template="Vec3d", name="child", position=tostr(samples) , showObject="1")
childNode.createObject('LinearMapping', template="Vec3d,Vec3d")
childNode = simNode.createChild('childF')
childNode.createObject('GaussPointContainer', position=tostr(samples))
childNode.createObject('MechanicalObject', template="F331", name="child")
childNode.createObject('LinearMapping', template="Vec3d,F331", showDeformationGradientScale="1")
childNode = simNode.createChild('Visu')
childNode.createObject('VisualModel', color="8e-1 8e-1 1 1e-1")
childNode.createObject('IdentityMapping')
childNode = simNode.createChild('Visu2')
childNode.createObject('VisualStyle', displayFlags="showWireframe")
childNode.createObject('VisualModel', color="8e-1 8e-1 1 1")
childNode.createObject('IdentityMapping')
simNode.createObject('PythonScriptController',filename="FEM.py", classname="Controller")
###########################################################
simNode = rootNode.createChild('Tetra_barycentric')
simNode.createObject('MeshTopology', name="mesh", position=tostr(restpos), tetrahedra="0 5 1 7 0 1 2 7 1 2 7 3 7 2 0 6 7 6 0 5 6 5 4 0")
simNode.createObject('MechanicalObject', template="Vec3d", name="parent", rest_position="@mesh.position",position=tostr(pos) )
simNode.createObject('BarycentricShapeFunction', position="@parent.rest_position", nbRef="4")
childNode = simNode.createChild('childP')
childNode.createObject('MechanicalObject', template="Vec3d", name="child", position=tostr(samples) , showObject="1")
childNode.createObject('LinearMapping', template="Vec3d,Vec3d")
childNode = simNode.createChild('childF')
childNode.createObject('GaussPointContainer', position=tostr(samples))
childNode.createObject('MechanicalObject', template="F331", name="child")
childNode.createObject('LinearMapping', template="Vec3d,F331")
simNode.createObject('PythonScriptController',filename="FEM.py", classname="Controller")
###########################################################
simNode = rootNode.createChild('Hexa_shepard')
simNode.createObject('MeshTopology', name="mesh", position=tostr(restpos), hexahedra="0 1 3 2 4 5 7 6")
simNode.createObject('MechanicalObject', template="Vec3d", name="parent", rest_position="@mesh.position",position=tostr(pos) )
simNode.createObject('ShepardShapeFunction', position="@parent.rest_position", power="2")
childNode = simNode.createChild('childP')
childNode.createObject('MechanicalObject', template="Vec3d", name="child", position=tostr(samples) , showObject="1")
childNode.createObject('LinearMapping', template="Vec3d,Vec3d")
childNode = simNode.createChild('childF')
childNode.createObject('GaussPointContainer', position=tostr(samples))
childNode.createObject('MechanicalObject', template="F331", name="child")
childNode.createObject('LinearMapping', template="Vec3d,F331")
simNode.createObject('PythonScriptController',filename="FEM.py", classname="Controller")
###########################################################
rootNode.animate=1
return rootNode
class Controller(Sofa.PythonScriptController):
def createGraph(self,node):
self.node=node
self.done=0
return 0
def onEndAnimationStep(self,dt):
if self.done==0:
print "TEST "+self.node.name+":"
# test points
restpos = self.node.getObject('childP/child').findData('rest_position').value
refpos = [transform(T,item) for item in restpos]
pos = self.node.getObject('childP/child').findData('position').value
error = compare(refpos,pos)
if error>ERRORTOL :
print "\t"+"\033[91m"+"[FAILED]"+"\033[0m"+" error on P= "+str(error)
else :
print "\t"+"\033[92m"+"[OK]"+"\033[0m"+" error on P= "+str(error)
# test defo gradients
restpos = [1,0,0,0,1,0,0,0,1]
pos = self.node.getObject('childF/child').findData('position').value
refpos = [transformF(T,restpos) for item in pos]
error = compare(refpos,pos)
if error>ERRORTOL :
print "\t"+"\033[91m"+"[FAILED]"+"\033[0m"+" error on F= "+str(error)
else :
print "\t"+"\033[92m"+"[OK]"+"\033[0m"+" error on F= "+str(error)
self.done=1
return 0
| FabienPean/sofa | applications/plugins/Flexible/examples/patch_test/FEM.py | Python | lgpl-2.1 | 6,005 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Serf(SConsPackage):
"""Apache Serf - a high performance C-based HTTP client library
built upon the Apache Portable Runtime (APR) library"""
homepage = 'https://serf.apache.org/'
url = 'https://archive.apache.org/dist/serf/serf-1.3.9.tar.bz2'
maintainers = ['cosmicexplorer']
version('1.3.9', sha256='549c2d21c577a8a9c0450facb5cca809f26591f048e466552240947bdf7a87cc')
version('1.3.8', sha256='e0500be065dbbce490449837bb2ab624e46d64fc0b090474d9acaa87c82b2590')
variant('debug', default=False,
description='Enable debugging info and strict compile warnings')
depends_on('apr')
depends_on('apr-util')
depends_on('openssl')
depends_on('python+pythoncmd', type='build')
depends_on('[email protected]:', type='build')
depends_on('uuid')
depends_on('zlib')
patch('py3syntax.patch')
patch('py3-hashbang.patch')
def build_args(self, spec, prefix):
args = {
'PREFIX': prefix,
'APR': spec['apr'].prefix,
'APU': spec['apr-util'].prefix,
'OPENSSL': spec['openssl'].prefix,
'ZLIB': spec['zlib'].prefix,
'DEBUG': 'yes' if '+debug' in spec else 'no',
}
# SCons doesn't pass Spack environment variables to the
# execution environment. Therefore, we can't use Spack's compiler
# wrappers. Use the actual compilers. SCons seems to RPATH things
# on its own anyway.
args['CC'] = self.compiler.cc
# Old versions of serf ignore the ZLIB variable on non-Windows platforms.
# Also, there is no UUID variable to specify its installation location.
# Pass explicit link flags for both.
library_dirs = []
include_dirs = []
for dep in spec.dependencies(deptype='link'):
query = self.spec[dep.name]
library_dirs.extend(query.libs.directories)
include_dirs.extend(query.headers.directories)
rpath = self.compiler.cc_rpath_arg
args['LINKFLAGS'] = '-L' + ' -L'.join(library_dirs)
args['LINKFLAGS'] += ' ' + rpath + (' ' + rpath).join(library_dirs)
args['CPPFLAGS'] = '-I' + ' -I'.join(include_dirs)
return [key + '=' + value for key, value in args.items()]
def build_test(self):
# FIXME: Several test failures:
#
# There were 14 failures:
# 1) test_ssl_trust_rootca
# 2) test_ssl_certificate_chain_with_anchor
# 3) test_ssl_certificate_chain_all_from_server
# 4) test_ssl_no_servercert_callback_allok
# 5) test_ssl_large_response
# 6) test_ssl_large_request
# 7) test_ssl_client_certificate
# 8) test_ssl_future_server_cert
# 9) test_setup_ssltunnel
# 10) test_ssltunnel_basic_auth
# 11) test_ssltunnel_basic_auth_server_has_keepalive_off
# 12) test_ssltunnel_basic_auth_proxy_has_keepalive_off
# 13) test_ssltunnel_basic_auth_proxy_close_conn_on_200resp
# 14) test_ssltunnel_digest_auth
#
# These seem to be related to:
# https://groups.google.com/forum/#!topic/serf-dev/YEFTTdF1Qwc
scons('check')
| LLNL/spack | var/spack/repos/builtin/packages/serf/package.py | Python | lgpl-2.1 | 3,412 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qnnpack(CMakePackage):
"""QNNPACK (Quantized Neural Networks PACKage) is a mobile-optimized
library for low-precision high-performance neural network inference.
QNNPACK provides implementation of common neural network operators on
quantized 8-bit tensors."""
homepage = "https://github.com/pytorch/QNNPACK"
git = "https://github.com/pytorch/QNNPACK.git"
version('master', branch='master')
version('2019-08-28', commit='7d2a4e9931a82adc3814275b6219a03e24e36b4c') # [email protected]:1.9
version('2018-12-27', commit='6c62fddc6d15602be27e9e4cbb9e985151d2fa82') # [email protected]
version('2018-12-04', commit='ef05e87cef6b8e719989ce875b5e1c9fdb304c05') # [email protected]:1.1
depends_on('[email protected]:', type='build')
depends_on('ninja', type='build')
depends_on('python', type='build')
resource(
name='cpuinfo',
git='https://github.com/Maratyszcza/cpuinfo.git',
destination='deps',
placement='cpuinfo'
)
resource(
name='fp16',
git='https://github.com/Maratyszcza/FP16.git',
destination='deps',
placement='fp16'
)
resource(
name='fxdiv',
git='https://github.com/Maratyszcza/FXdiv.git',
destination='deps',
placement='fxdiv'
)
resource(
name='googlebenchmark',
url='https://github.com/google/benchmark/archive/v1.4.1.zip',
sha256='61ae07eb5d4a0b02753419eb17a82b7d322786bb36ab62bd3df331a4d47c00a7',
destination='deps',
placement='googlebenchmark',
)
resource(
name='googletest',
url='https://github.com/google/googletest/archive/release-1.8.0.zip',
sha256='f3ed3b58511efd272eb074a3a6d6fb79d7c2e6a0e374323d1e6bcbcc1ef141bf',
destination='deps',
placement='googletest',
)
resource(
name='psimd',
git='https://github.com/Maratyszcza/psimd.git',
destination='deps',
placement='psimd'
)
resource(
name='pthreadpool',
git='https://github.com/Maratyszcza/pthreadpool.git',
destination='deps',
placement='pthreadpool'
)
generator = 'Ninja'
def cmake_args(self):
return [
self.define('CPUINFO_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'cpuinfo')),
self.define('FP16_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'fp16')),
self.define('FXDIV_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'fxdiv')),
self.define('PSIMD_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'psimd')),
self.define('PTHREADPOOL_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'pthreadpool')),
self.define('GOOGLEBENCHMARK_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'googlebenchmark')),
self.define('GOOGLETEST_SOURCE_DIR',
join_path(self.stage.source_path, 'deps', 'googletest')),
]
| LLNL/spack | var/spack/repos/builtin/packages/qnnpack/package.py | Python | lgpl-2.1 | 3,348 |
import unittest
from ctypes import *
from struct import calcsize
import _testcapi
class SubclassesTest(unittest.TestCase):
def test_subclass(self):
class X(Structure):
_fields_ = [("a", c_int)]
class Y(X):
_fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
def test_subclass_delayed(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0)
X._fields_ = [("a", c_int)]
class Y(X):
pass
self.assertEqual(sizeof(Y), sizeof(X))
Y._fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
class StructureTestCase(unittest.TestCase):
formats = {"c": c_char,
"b": c_byte,
"B": c_ubyte,
"h": c_short,
"H": c_ushort,
"i": c_int,
"I": c_uint,
"l": c_long,
"L": c_ulong,
"q": c_longlong,
"Q": c_ulonglong,
"f": c_float,
"d": c_double,
}
def test_simple_structs(self):
for code, tp in self.formats.items():
class X(Structure):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("c%c0%c" % (code, code)), code))
def test_unions(self):
for code, tp in self.formats.items():
class X(Union):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("%c" % (code)), code))
def test_struct_alignment(self):
class X(Structure):
_fields_ = [("x", c_char * 3)]
self.assertEqual(alignment(X), calcsize("s"))
self.assertEqual(sizeof(X), calcsize("3s"))
class Y(Structure):
_fields_ = [("x", c_char * 3),
("y", c_int)]
self.assertEqual(alignment(Y), calcsize("i"))
self.assertEqual(sizeof(Y), calcsize("3si"))
class SI(Structure):
_fields_ = [("a", X),
("b", Y)]
self.assertEqual(alignment(SI), max(alignment(Y), alignment(X)))
self.assertEqual(sizeof(SI), calcsize("3s0i 3si 0i"))
class IS(Structure):
_fields_ = [("b", Y),
("a", X)]
self.assertEqual(alignment(SI), max(alignment(X), alignment(Y)))
self.assertEqual(sizeof(IS), calcsize("3si 3s 0i"))
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), alignment(X))
self.assertEqual(sizeof(XX), calcsize("3s 3s 0s"))
def test_emtpy(self):
# I had problems with these
#
# Although these are patological cases: Empty Structures!
class X(Structure):
_fields_ = []
class Y(Union):
_fields_ = []
# Is this really the correct alignment, or should it be 0?
self.assertTrue(alignment(X) == alignment(Y) == 1)
self.assertTrue(sizeof(X) == sizeof(Y) == 0)
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), 1)
self.assertEqual(sizeof(XX), 0)
def test_fields(self):
# test the offset and size attributes of Structure/Unoin fields.
class X(Structure):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, sizeof(c_int))
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
class X(Union):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, 0)
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
# XXX Should we check nested data types also?
# offset is always relative to the class...
def test_packed(self):
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 1
self.assertEqual(sizeof(X), 9)
self.assertEqual(X.b.offset, 1)
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 2
self.assertEqual(sizeof(X), 10)
self.assertEqual(X.b.offset, 2)
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 4
self.assertEqual(sizeof(X), 12)
self.assertEqual(X.b.offset, 4)
import struct
longlong_size = struct.calcsize("q")
longlong_align = struct.calcsize("bq") - longlong_size
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 8
self.assertEqual(sizeof(X), longlong_align + longlong_size)
self.assertEqual(X.b.offset, min(8, longlong_align))
d = {"_fields_": [("a", "b"),
("b", "q")],
"_pack_": -1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
# Issue 15989
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.INT_MAX + 1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.UINT_MAX + 2}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
def test_initializers(self):
class Person(Structure):
_fields_ = [("name", c_char*6),
("age", c_int)]
self.assertRaises(TypeError, Person, 42)
self.assertRaises(ValueError, Person, b"asldkjaslkdjaslkdj")
self.assertRaises(TypeError, Person, "Name", "HI")
# short enough
self.assertEqual(Person(b"12345", 5).name, b"12345")
# exact fit
self.assertEqual(Person(b"123456", 5).name, b"123456")
# too long
self.assertRaises(ValueError, Person, b"1234567", 5)
def test_conflicting_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
# conflicting positional and keyword args
self.assertRaises(TypeError, POINT, 2, 3, x=4)
self.assertRaises(TypeError, POINT, 2, 3, y=4)
# too many initializers
self.assertRaises(TypeError, POINT, 2, 3, 4)
def test_keyword_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
pt = POINT(1, 2)
self.assertEqual((pt.x, pt.y), (1, 2))
pt = POINT(y=2, x=1)
self.assertEqual((pt.x, pt.y), (1, 2))
def test_invalid_field_types(self):
class POINT(Structure):
pass
self.assertRaises(TypeError, setattr, POINT, "_fields_", [("x", 1), ("y", 2)])
def test_invalid_name(self):
# field name must be string
def declare_with_name(name):
class S(Structure):
_fields_ = [(name, c_int)]
self.assertRaises(TypeError, declare_with_name, b"x")
def test_intarray_fields(self):
class SomeInts(Structure):
_fields_ = [("a", c_int * 4)]
# can use tuple to initialize array (but not list!)
self.assertEqual(SomeInts((1, 2)).a[:], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::-1], [0, 0, 2, 1])
self.assertEqual(SomeInts((1, 2)).a[::2], [1, 0])
self.assertEqual(SomeInts((1, 2)).a[1:5:6], [2])
self.assertEqual(SomeInts((1, 2)).a[6:4:-1], [])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[:], [1, 2, 3, 4])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[::], [1, 2, 3, 4])
# too long
# XXX Should raise ValueError?, not RuntimeError
self.assertRaises(RuntimeError, SomeInts, (1, 2, 3, 4, 5))
def test_nested_initializers(self):
# test initializing nested structures
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
p = Person(b"Someone", (b"1234", b"5678"), 5)
self.assertEqual(p.name, b"Someone")
self.assertEqual(p.phone.areacode, b"1234")
self.assertEqual(p.phone.number, b"5678")
self.assertEqual(p.age, 5)
def test_structures_with_wchar(self):
try:
c_wchar
except NameError:
return # no unicode
class PersonW(Structure):
_fields_ = [("name", c_wchar * 12),
("age", c_int)]
p = PersonW("Someone \xe9")
self.assertEqual(p.name, "Someone \xe9")
self.assertEqual(PersonW("1234567890").name, "1234567890")
self.assertEqual(PersonW("12345678901").name, "12345678901")
# exact fit
self.assertEqual(PersonW("123456789012").name, "123456789012")
#too long
self.assertRaises(ValueError, PersonW, "1234567890123")
def test_init_errors(self):
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
cls, msg = self.get_except(Person, b"Someone", (1, 2))
self.assertEqual(cls, RuntimeError)
self.assertEqual(msg,
"(Phone) <class 'TypeError'>: "
"expected string, int found")
cls, msg = self.get_except(Person, b"Someone", (b"a", b"b", b"c"))
self.assertEqual(cls, RuntimeError)
if issubclass(Exception, object):
self.assertEqual(msg,
"(Phone) <class 'TypeError'>: too many initializers")
else:
self.assertEqual(msg, "(Phone) TypeError: too many initializers")
def test_huge_field_name(self):
# issue12881: segfault with large structure field names
def create_class(length):
class S(Structure):
_fields_ = [('x' * length, c_int)]
for length in [10 ** i for i in range(0, 8)]:
try:
create_class(length)
except MemoryError:
# MemoryErrors are OK, we just don't want to segfault
pass
def get_except(self, func, *args):
try:
func(*args)
except Exception as detail:
return detail.__class__, str(detail)
## def test_subclass_creation(self):
## meta = type(Structure)
## # same as 'class X(Structure): pass'
## # fails, since we need either a _fields_ or a _abstract_ attribute
## cls, msg = self.get_except(meta, "X", (Structure,), {})
## self.assertEqual((cls, msg),
## (AttributeError, "class must define a '_fields_' attribute"))
def test_abstract_class(self):
class X(Structure):
_abstract_ = "something"
# try 'X()'
cls, msg = self.get_except(eval, "X()", locals())
self.assertEqual((cls, msg), (TypeError, "abstract class"))
def test_methods(self):
## class X(Structure):
## _fields_ = []
self.assertTrue("in_dll" in dir(type(Structure)))
self.assertTrue("from_address" in dir(type(Structure)))
self.assertTrue("in_dll" in dir(type(Structure)))
def test_positional_args(self):
# see also http://bugs.python.org/issue5042
class W(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
class X(W):
_fields_ = [("c", c_int)]
class Y(X):
pass
class Z(Y):
_fields_ = [("d", c_int), ("e", c_int), ("f", c_int)]
z = Z(1, 2, 3, 4, 5, 6)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 2, 3, 4, 5, 6))
z = Z(1)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 0, 0, 0, 0, 0))
self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7))
class PointerMemberTestCase(unittest.TestCase):
def test(self):
# a Structure with a POINTER field
class S(Structure):
_fields_ = [("array", POINTER(c_int))]
s = S()
# We can assign arrays of the correct type
s.array = (c_int * 3)(1, 2, 3)
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
# The following are bugs, but are included here because the unittests
# also describe the current behaviour.
#
# This fails with SystemError: bad arg to internal function
# or with IndexError (with a patch I have)
s.array[0] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [42, 2, 3])
s.array[0] = 1
## s.array[1] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
def test_none_to_pointer_fields(self):
class S(Structure):
_fields_ = [("x", c_int),
("p", POINTER(c_int))]
s = S()
s.x = 12345678
s.p = None
self.assertEqual(s.x, 12345678)
class TestRecursiveStructure(unittest.TestCase):
def test_contains_itself(self):
class Recursive(Structure):
pass
try:
Recursive._fields_ = [("next", Recursive)]
except AttributeError as details:
self.assertTrue("Structure or union cannot contain itself" in
str(details))
else:
self.fail("Structure or union cannot contain itself")
def test_vice_versa(self):
class First(Structure):
pass
class Second(Structure):
pass
First._fields_ = [("second", Second)]
try:
Second._fields_ = [("first", First)]
except AttributeError as details:
self.assertTrue("_fields_ is final" in
str(details))
else:
self.fail("AttributeError not raised")
if __name__ == '__main__':
unittest.main()
| LaoZhongGu/kbengine | kbe/src/lib/python/Lib/ctypes/test/test_structures.py | Python | lgpl-3.0 | 15,782 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_stmt_simplify():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope(i < 12):
A[i] = C[i]
body = tvm.tir.LetStmt(n, 10, ib.get())
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body, tvm.tir.Store)
def test_thread_extent_simplify():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(ty, "thread_extent", 1)
with ib.if_scope(tx + ty < 12):
A[tx] = C[tx + ty]
body = tvm.tir.LetStmt(n, 10, ib.get())
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body.body.body, tvm.tir.Store)
def test_if_likely():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", 32)
ib.scope_attr(ty, "thread_extent", 32)
with ib.if_scope(ib.likely(tx * 32 + ty < n)):
with ib.if_scope(ib.likely(tx * 32 + ty < n)):
A[tx] = C[tx * 32 + ty]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body.body, tvm.tir.IfThenElse)
assert not isinstance(body.body.body.then_case, tvm.tir.IfThenElse)
def test_basic_likely_elimination():
n = te.size_var("n")
X = te.placeholder(shape=(n,), name="x")
W = te.placeholder(shape=(n + 1,), dtype="int32", name="w")
def f(i):
start = W[i]
extent = W[i + 1] - W[i]
rv = te.reduce_axis((0, extent))
return te.sum(X[rv + start], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
stmt = tvm.lower(s, [X, W, Y], simple_mode=True)
assert "if" not in str(stmt)
def test_complex_likely_elimination():
def cumsum(X):
"""
Y[i] = sum(X[:i])
"""
(m,) = X.shape
s_state = te.placeholder((m + 1,), dtype="int32", name="state")
s_init = te.compute((1,), lambda _: tvm.tir.const(0, "int32"))
s_update = te.compute((m + 1,), lambda l: s_state[l - 1] + X[l - 1])
return tvm.te.scan(s_init, s_update, s_state, inputs=[X], name="cumsum")
def sparse_lengths_sum(data, indices, lengths):
oshape = list(data.shape)
oshape[0] = lengths.shape[0]
length_offsets = cumsum(lengths)
def sls(n, d):
gg = te.reduce_axis((0, lengths[n]))
indices_idx = length_offsets[n] + gg
data_idx = indices[indices_idx]
data_val = data[data_idx, d]
return te.sum(data_val, axis=gg)
return te.compute(oshape, sls)
m, n, d, i, l = (
te.size_var("m"),
te.size_var("n"),
te.size_var("d"),
te.size_var("i"),
te.size_var("l"),
)
data_ph = te.placeholder((m, d * 32), name="data")
indices_ph = te.placeholder((i,), name="indices", dtype="int32")
lengths_ph = te.placeholder((n,), name="lengths", dtype="int32")
Y = sparse_lengths_sum(data_ph, indices_ph, lengths_ph)
s = te.create_schedule([Y.op])
(n, d) = s[Y].op.axis
(do, di) = s[Y].split(d, factor=32)
(gg,) = s[Y].op.reduce_axis
s[Y].reorder(n, do, gg, di)
s[Y].vectorize(di)
stmt = tvm.lower(s, [data_ph, indices_ph, lengths_ph, Y], simple_mode=True)
assert "if" not in str(stmt)
if __name__ == "__main__":
test_stmt_simplify()
test_thread_extent_simplify()
test_if_likely()
test_basic_likely_elimination()
test_complex_likely_elimination()
| dmlc/tvm | tests/python/unittest/test_tir_transform_simplify.py | Python | apache-2.0 | 5,007 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
module: iworkflow_license_pool
short_description: Manage license pools in iWorkflow.
description:
- Manage license pools in iWorkflow.
version_added: 2.4
options:
name:
description:
- Name of the license pool to create.
required: True
state:
description:
- Whether the license pool should exist, or not. A state of C(present)
will attempt to activate the license pool if C(accept_eula) is set
to C(yes).
required: False
default: present
choices:
- present
- absent
base_key:
description:
- Key that the license server uses to verify the functionality that
you are entitled to license. This option is required if you are
creating a new license.
required: False
default: None
accept_eula:
description:
- Specifies that you accept the EULA that is part of iWorkflow. Note
that this is required to activate the license pool. If this is not
specified, or it is set to C(no), then the pool will remain in a state
of limbo until you choose to accept the EULA. This option is required
when updating a license. It is also suggested that you provide it when
creating a license, but if you do not, the license will remain
inactive and you will have to run this module again with this option
set to C(yes) to activate it.
required: False
default: 'no'
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create license pool
iworkflow_license_pool:
accept_eula: "yes"
name: "my-lic-pool"
base_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX"
state: "present"
server: "iwf.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
'''
RETURN = '''
'''
import time
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
F5ModuleError,
HAS_F5SDK,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'baseRegKey': 'base_key'
}
returnables = []
api_attributes = [
'baseRegKey', 'state'
]
updatables = []
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def name(self):
if self._values['name'] is None:
return None
name = str(self._values['name']).strip()
if name == '':
raise F5ModuleError(
"You must specify a name for this module"
)
return name
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def _pool_is_licensed(self):
if self.have.state == 'LICENSED':
return True
return False
def _pool_is_unlicensed_eula_unaccepted(self, current):
if current.state != 'LICENSED' and not self.want.accept_eula:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=changed))
return result
def exists(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
if len(collection) == 1:
return True
elif len(collection) == 0:
return False
else:
raise F5ModuleError(
"Multiple license pools with the provided name were found!"
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def should_update(self):
if self._pool_is_licensed():
return False
if self._pool_is_unlicensed_eula_unaccepted():
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
resource = collection.pop()
resource.modify(
state='RELICENSE',
method='AUTOMATIC'
)
return self._wait_for_license_pool_state_to_activate(resource)
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
if self.want.base_key is None:
raise F5ModuleError(
"You must specify a 'base_key' when creating a license pool"
)
self.create_on_device()
return True
def read_current_from_device(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
resource = collection.pop()
result = resource.attrs
return Parameters(result)
def create_on_device(self):
resource = self.client.api.cm.shared.licensing.pools_s.pool.create(
name=self.want.name,
baseRegKey=self.want.base_key,
method="AUTOMATIC"
)
return self._wait_for_license_pool_state_to_activate(resource)
def _wait_for_license_pool_state_to_activate(self, pool):
error_values = ['EXPIRED', 'FAILED']
# Wait no more than 5 minutes
for x in range(1, 30):
pool.refresh()
if pool.state == 'LICENSED':
return True
elif pool.state == 'WAITING_FOR_EULA_ACCEPTANCE':
pool.modify(
eulaText=pool.eulaText,
state='ACCEPTED_EULA'
)
elif pool.state in error_values:
raise F5ModuleError(pool.errorText)
time.sleep(10)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the license pool")
return True
def remove_from_device(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
resource = collection.pop()
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
accept_eula=dict(
type='bool',
default='no',
choices=BOOLEANS
),
base_key=dict(
required=False,
no_log=True
),
name=dict(
required=True
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'iworkflow'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| mcgonagle/ansible_f5 | library_old/iworkflow_license_pool.py | Python | apache-2.0 | 10,879 |
# ./darwinpush/xb/raw/sm.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:8eb48f8f0e727f488907a816c69d6ed98ba221c7
# Generated 2015-04-23 16:42:14.513978 by PyXB version 1.2.4 using Python 3.4.1.final.0
# Namespace http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1 [xmlns:sm]
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}MsgCategoryType
class MsgCategoryType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""The category of operator message"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'MsgCategoryType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 15, 1)
_Documentation = 'The category of operator message'
MsgCategoryType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=MsgCategoryType, enum_prefix=None)
MsgCategoryType.Train = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Train', tag='Train')
MsgCategoryType.Station = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Station', tag='Station')
MsgCategoryType.Connections = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Connections', tag='Connections')
MsgCategoryType.System = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='System', tag='System')
MsgCategoryType.Misc = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Misc', tag='Misc')
MsgCategoryType.PriorTrains = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='PriorTrains', tag='PriorTrains')
MsgCategoryType.PriorOther = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='PriorOther', tag='PriorOther')
MsgCategoryType._InitializeFacetMap(MsgCategoryType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'MsgCategoryType', MsgCategoryType)
# Atomic simple type: {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}MsgSeverityType
class MsgSeverityType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""The severity of operator message"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'MsgSeverityType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 29, 1)
_Documentation = 'The severity of operator message'
MsgSeverityType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=MsgSeverityType, enum_prefix=None)
MsgSeverityType.n0 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='0', tag='n0')
MsgSeverityType.n1 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='1', tag='n1')
MsgSeverityType.n2 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='2', tag='n2')
MsgSeverityType.n3 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='3', tag='n3')
MsgSeverityType._InitializeFacetMap(MsgSeverityType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'MsgSeverityType', MsgSeverityType)
# Complex type [anonymous] with content type MIXED
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
"""The content of the message"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 58, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}p uses Python identifier p
__p = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'p'), 'p', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_httpwww_thalesgroup_comrttiPushPortStationMessagesv1p', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1), )
p = property(__p.value, __p.set, None, 'Defines an HTML paragraph')
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}a uses Python identifier a
__a = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'a'), 'a', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_httpwww_thalesgroup_comrttiPushPortStationMessagesv1a', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1), )
a = property(__a.value, __a.set, None, 'Defines an HTML anchor')
_ElementMap.update({
__p.name() : __p,
__a.name() : __a
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type MIXED
class CTD_ANON_ (pyxb.binding.basis.complexTypeDefinition):
"""Defines an HTML paragraph"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 88, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}a uses Python identifier a
__a = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'a'), 'a', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON__httpwww_thalesgroup_comrttiPushPortStationMessagesv1a', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1), )
a = property(__a.value, __a.set, None, 'Defines an HTML anchor')
_ElementMap.update({
__a.name() : __a
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type SIMPLE
class CTD_ANON_2 (pyxb.binding.basis.complexTypeDefinition):
"""Defines an HTML anchor"""
_TypeDefinition = pyxb.binding.datatypes.string
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 98, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.string
# Attribute href uses Python identifier href
__href = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'href'), 'href', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_2_href', pyxb.binding.datatypes.string, required=True)
__href._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 101, 5)
__href._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 101, 5)
href = property(__href.value, __href.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__href.name() : __href
})
# Complex type {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}StationMessage with content type ELEMENT_ONLY
class StationMessage (pyxb.binding.basis.complexTypeDefinition):
"""Darwin Workstation Station Message"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'StationMessage')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 41, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}Station uses Python identifier Station
__Station = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Station'), 'Station', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_httpwww_thalesgroup_comrttiPushPortStationMessagesv1Station', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3), )
Station = property(__Station.value, __Station.set, None, 'The Stations the message is being applied to')
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}Msg uses Python identifier Msg
__Msg = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Msg'), 'Msg', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_httpwww_thalesgroup_comrttiPushPortStationMessagesv1Msg', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3), )
Msg = property(__Msg.value, __Msg.set, None, 'The content of the message')
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_id', pyxb.binding.datatypes.int, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 66, 2)
__id._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 66, 2)
id = property(__id.value, __id.set, None, None)
# Attribute cat uses Python identifier cat
__cat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'cat'), 'cat', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_cat', MsgCategoryType, required=True)
__cat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 67, 2)
__cat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 67, 2)
cat = property(__cat.value, __cat.set, None, 'The category of message')
# Attribute sev uses Python identifier sev
__sev = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'sev'), 'sev', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_sev', MsgSeverityType, required=True)
__sev._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 72, 2)
__sev._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 72, 2)
sev = property(__sev.value, __sev.set, None, 'The severity of the message')
# Attribute suppress uses Python identifier suppress
__suppress = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'suppress'), 'suppress', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_suppress', pyxb.binding.datatypes.boolean, unicode_default='false')
__suppress._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 77, 2)
__suppress._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 77, 2)
suppress = property(__suppress.value, __suppress.set, None, 'Whether the train running information is suppressed to the public')
_ElementMap.update({
__Station.name() : __Station,
__Msg.name() : __Msg
})
_AttributeMap.update({
__id.name() : __id,
__cat.name() : __cat,
__sev.name() : __sev,
__suppress.name() : __suppress
})
Namespace.addCategoryObject('typeBinding', 'StationMessage', StationMessage)
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_3 (pyxb.binding.basis.complexTypeDefinition):
"""The Stations the message is being applied to"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 50, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute crs uses Python identifier crs
__crs = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'crs'), 'crs', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_3_crs', _ImportedBinding_darwinpush_xb_ct.CrsType, required=True)
__crs._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 51, 5)
__crs._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 51, 5)
crs = property(__crs.value, __crs.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__crs.name() : __crs
})
p = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'p'), CTD_ANON_, documentation='Defines an HTML paragraph', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1))
Namespace.addCategoryObject('elementBinding', p.name().localName(), p)
a = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1))
Namespace.addCategoryObject('elementBinding', a.name().localName(), a)
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'p'), CTD_ANON_, scope=CTD_ANON, documentation='Defines an HTML paragraph', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, scope=CTD_ANON, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 60, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 61, 6))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'p')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 60, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'a')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 61, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton()
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, scope=CTD_ANON_, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 90, 4))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'a')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 90, 4))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_._Automaton = _BuildAutomaton_()
StationMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Station'), CTD_ANON_3, scope=StationMessage, documentation='The Stations the message is being applied to', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3)))
StationMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Msg'), CTD_ANON, scope=StationMessage, documentation='The content of the message', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3))
counters.add(cc_0)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(StationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Station')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(StationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Msg')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
StationMessage._Automaton = _BuildAutomaton_2()
| HackTrain/darwinpush | darwinpush/xb/raw/sm.py | Python | apache-2.0 | 22,743 |
from urlparse import urlparse
from api_tests.nodes.views.test_node_contributors_list import NodeCRUDTestCase
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from tests.base import fake
from osf_tests.factories import (
ProjectFactory,
CommentFactory,
RegistrationFactory,
WithdrawnRegistrationFactory,
)
class TestWithdrawnRegistrations(NodeCRUDTestCase):
def setUp(self):
super(TestWithdrawnRegistrations, self).setUp()
self.registration = RegistrationFactory(creator=self.user, project=self.public_project)
self.withdrawn_registration = WithdrawnRegistrationFactory(registration=self.registration, user=self.registration.creator)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.withdrawn_url = '/{}registrations/{}/?version=2.2'.format(API_BASE, self.registration._id)
self.withdrawn_registration.justification = 'We made a major error.'
self.withdrawn_registration.save()
def test_can_access_withdrawn_contributors(self):
url = '/{}registrations/{}/contributors/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_cannot_access_withdrawn_children(self):
url = '/{}registrations/{}/children/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_comments(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_comment = CommentFactory(node=self.public_project, user=self.user)
url = '/{}registrations/{}/comments/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_can_access_withdrawn_contributor_detail(self):
url = '/{}registrations/{}/contributors/{}/'.format(API_BASE, self.registration._id, self.user._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_cannot_return_a_withdrawn_registration_at_node_detail_endpoint(self):
url = '/{}nodes/{}/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_cannot_delete_a_withdrawn_registration(self):
url = '/{}registrations/{}/'.format(API_BASE, self.registration._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
self.registration.reload()
assert_equal(res.status_code, 405)
def test_cannot_access_withdrawn_files_list(self):
url = '/{}registrations/{}/files/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_links_detail(self):
url = '/{}registrations/{}/node_links/{}/'.format(API_BASE, self.registration._id, self.public_pointer._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_links_list(self):
url = '/{}registrations/{}/node_links/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_logs(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
url = '/{}registrations/{}/logs/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_registrations_list(self):
self.registration.save()
url = '/{}registrations/{}/registrations/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_withdrawn_registrations_display_limited_fields(self):
registration = self.registration
res = self.app.get(self.withdrawn_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
registration.reload()
expected_attributes = {
'title': registration.title,
'description': registration.description,
'date_created': registration.date_created.isoformat().replace('+00:00', 'Z'),
'date_registered': registration.registered_date.isoformat().replace('+00:00', 'Z'),
'date_modified': registration.date_modified.isoformat().replace('+00:00', 'Z'),
'withdrawal_justification': registration.retraction.justification,
'public': None,
'category': None,
'registration': True,
'fork': None,
'collection': None,
'tags': None,
'withdrawn': True,
'pending_withdrawal': None,
'pending_registration_approval': None,
'pending_embargo_approval': None,
'embargo_end_date': None,
'registered_meta': None,
'current_user_permissions': None,
'registration_supplement': registration.registered_schema.first().name
}
for attribute in expected_attributes:
assert_equal(expected_attributes[attribute], attributes[attribute])
contributors = urlparse(res.json['data']['relationships']['contributors']['links']['related']['href']).path
assert_equal(contributors, '/{}registrations/{}/contributors/'.format(API_BASE, registration._id))
assert_not_in('children', res.json['data']['relationships'])
assert_not_in('comments', res.json['data']['relationships'])
assert_not_in('node_links', res.json['data']['relationships'])
assert_not_in('registrations', res.json['data']['relationships'])
assert_not_in('parent', res.json['data']['relationships'])
assert_not_in('forked_from', res.json['data']['relationships'])
assert_not_in('files', res.json['data']['relationships'])
assert_not_in('logs', res.json['data']['relationships'])
assert_not_in('registered_by', res.json['data']['relationships'])
assert_not_in('registered_from', res.json['data']['relationships'])
assert_not_in('root', res.json['data']['relationships'])
def test_field_specific_related_counts_ignored_if_hidden_field_on_withdrawn_registration(self):
url = '/{}registrations/{}/?related_counts=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_not_in('children', res.json['data']['relationships'])
assert_in('contributors', res.json['data']['relationships'])
def test_field_specific_related_counts_retrieved_if_visible_field_on_withdrawn_registration(self):
url = '/{}registrations/{}/?related_counts=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['relationships']['contributors']['links']['related']['meta']['count'], 1)
| monikagrabowska/osf.io | api_tests/registrations/views/test_withdrawn_registrations.py | Python | apache-2.0 | 7,865 |
from collections import OrderedDict
from app.master.atom_grouper import AtomGrouper
class TimeBasedAtomGrouper(object):
"""
This class implements the algorithm to best split & group atoms based on historic time values. This algorithm is
somewhat complicated, so I'm going to give a summary here.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Let N be the number of concurrent executors allocated for this job.
Let T be the aggregate serial time to execute all atoms on a single executor.
Both N and T are known values at the beginning of this algorithm.
In the ideal subjob atom-grouping, we would have exactly N subjobs, each allocated with T/N amount of work that
would all end at the same time. However, in reality, there are a few factors that makes this solution unfeasible:
- There is a significant amount of variability in the times of running these atoms, so numbers are never exact.
- Certain builds will introduce new tests (for which we don't have historical time data for).
- Not all of the machines are exactly the same, so we can't expect identical performance.
We have two aims for this algorithm:
- Minimize the amount of framework overhead (time spent sending and retrieving subjobs) and maximize the amount of
time the slaves actually spend running the build.
- Don't overload any single executor with too much work--this will cause the whole build to wait on a single
executor. We want to try to get all of the executors to end as close to the same time as possible in order to
get rid of any inefficient use of slave machines.
In order to accomplish this, the algorithm implemented by this class tries to split up the majority of the atoms
into N buckets, and splits up the rest of the atoms into smaller buckets. Hopefully, the timeline graph of
executed subjobs for each of the executors would end up looking like this:
[========================================================================][===][==][==]
[===============================================================================][==]
[====================================================================][====][===][==][=]
[========================================================================][===][==][=]
[=====================================================================][====][==][==]
[==================================================================================][=]
[===================================================================][======][==][==]
The algorithm has two stages of subjob creation: the 'big chunk' stage and the 'small chunk' stage. The 'big chunk'
stage creates exactly N large subjob groupings that will consist of the majority of atoms (in terms of runtime).
The 'small chunk' stage creates ~2N short subjob groupings that will be used to fill in the gaps in order to aim for
having all of the executors end at similar times.
Notes:
- For new atoms that we don't have historic times for, we will assign it the highest atom time value in order to
avoid underestimating the length of unknown atoms.
- We will have to try tweaking the percentage of T that we want to be allocated for the initial large batch of
big subjobs. Same goes for the number and size of the smaller buckets.
"""
BIG_CHUNK_FRACTION = 0.8
def __init__(self, atoms, max_executors, atom_time_map, project_directory):
"""
:param atoms: the list of atoms for this build
:type atoms: list[app.master.atom.Atom]
:param max_executors: the maximum number of executors for this build
:type max_executors: int
:param atom_time_map: a dictionary containing the historic times for atoms for this particular job
:type atom_time_map: dict[str, float]
:type project_directory: str
"""
self._atoms = atoms
self._max_executors = max_executors
self._atom_time_map = atom_time_map
self._project_directory = project_directory
def groupings(self):
"""
Group the atoms into subjobs using historic timing data.
:return: a list of lists of atoms
:rtype: list[list[app.master.atom.Atom]]
"""
# 1). Coalesce the atoms with historic atom times, and also get total estimated runtime
try:
total_estimated_runtime = self._set_expected_atom_times(
self._atoms, self._atom_time_map, self._project_directory)
except _AtomTimingDataError:
grouper = AtomGrouper(self._atoms, self._max_executors)
return grouper.groupings()
# 2). Sort them by decreasing time, and add them to an OrderedDict
atoms_by_decreasing_time = sorted(self._atoms, key=lambda atom: atom.expected_time, reverse=True)
sorted_atom_times_left = OrderedDict([(atom, atom.expected_time) for atom in atoms_by_decreasing_time])
# 3). Group them!
# Calculate what the target 'big subjob' time is going to be for each executor's initial subjob
big_subjob_time = (total_estimated_runtime * self.BIG_CHUNK_FRACTION) / self._max_executors
# Calculate what the target 'small subjob' time is going to be
small_subjob_time = (total_estimated_runtime * (1.0 - self.BIG_CHUNK_FRACTION)) / (2 * self._max_executors)
# _group_atoms_into_sized_buckets() will remove elements from sorted_atom_times_left.
subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, big_subjob_time, self._max_executors)
small_subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, small_subjob_time, None)
subjobs.extend(small_subjobs)
return subjobs
def _set_expected_atom_times(self, new_atoms, old_atoms_with_times, project_directory):
"""
Set the expected runtime (new_atom.expected_time) of each atom in new_atoms using historic timing data.
Additionally, return the total estimated serial-runtime for this build. Although this seems like an odd thing
for this method to return, it is done here for efficiency. There can be thousands of atoms, and iterating
through them multiple times seems inefficient.
:param new_atoms: the list of atoms that will be run in this build
:type new_atoms: list[app.master.atom.Atom]
:param old_atoms_with_times: a dictionary containing the historic times for atoms for this particular job
:type old_atoms_with_times: dict[str, float]
:type project_directory: str
:return: the total estimated runtime in seconds
:rtype: float
"""
atoms_without_timing_data = []
total_time = 0
max_atom_time = 0
# Generate list for atoms that have timing data
for new_atom in new_atoms:
if new_atom.command_string not in old_atoms_with_times:
atoms_without_timing_data.append(new_atom)
continue
new_atom.expected_time = old_atoms_with_times[new_atom.command_string]
# Discover largest single atom time to use as conservative estimates for atoms with unknown times
if max_atom_time < new_atom.expected_time:
max_atom_time = new_atom.expected_time
# We want to return the atom with the project directory still in it, as this data will directly be
# sent to the slave to be run.
total_time += new_atom.expected_time
# For the atoms without historic timing data, assign them the largest atom time we have
for new_atom in atoms_without_timing_data:
new_atom.expected_time = max_atom_time
if len(new_atoms) == len(atoms_without_timing_data):
raise _AtomTimingDataError
total_time += (max_atom_time * len(atoms_without_timing_data))
return total_time
def _group_atoms_into_sized_buckets(self, sorted_atom_time_dict, target_group_time, max_groups_to_create):
"""
Given a sorted dictionary (Python FTW) of [atom, time] pairs in variable sorted_atom_time_dict, return a list
of lists of atoms that each are estimated to take target_group_time seconds. This method will generate at most
max_groups_to_create groupings, and will return once this limit is reached or when sorted_atom_time_dict is
empty.
Note, this method will modify sorted_atom_time_dict's state by removing elements as needed (often from the
middle of the collection).
:param sorted_atom_time_dict: the sorted (longest first), double-ended queue containing [atom, time] pairs.
This OrderedDict will have elements removed from this method.
:type sorted_atom_time_dict: OrderedDict[app.master.atom.Atom, float]
:param target_group_time: how long each subjob should approximately take
:type target_group_time: float
:param max_groups_to_create: the maximum number of subjobs to create. Once max_groups_to_create limit is
reached, this method will return the subjobs that have already been grouped. If set to None, then there
is no limit.
:type max_groups_to_create: int|None
:return: the groups of grouped atoms, with each group taking an estimated target_group_time
:rtype: list[list[app.master.atom.Atom]]
"""
subjobs = []
subjob_time_so_far = 0
subjob_atoms = []
while (max_groups_to_create is None or len(subjobs) < max_groups_to_create) and len(sorted_atom_time_dict) > 0:
for atom, time in sorted_atom_time_dict.items():
if len(subjob_atoms) == 0 or (time + subjob_time_so_far) <= target_group_time:
subjob_time_so_far += time
subjob_atoms.append(atom)
sorted_atom_time_dict.pop(atom)
# If (number of subjobs created so far + atoms left) is less than or equal to the total number of
# subjobs we need to create, then have each remaining atom be a subjob and return.
# The "+ 1" is here to account for the current subjob being generated, but that hasn't been
# appended to subjobs yet.
if max_groups_to_create is not None and (len(subjobs) + len(sorted_atom_time_dict) + 1) <= max_groups_to_create:
subjobs.append(subjob_atoms)
for atom, _ in sorted_atom_time_dict.items():
sorted_atom_time_dict.pop(atom)
subjobs.append([atom])
return subjobs
subjobs.append(subjob_atoms)
subjob_atoms = []
subjob_time_so_far = 0
return subjobs
class _AtomTimingDataError(Exception):
"""
An exception to represent the case where the atom timing data is either not present or incorrect.
"""
| nickzuber/ClusterRunner | app/master/time_based_atom_grouper.py | Python | apache-2.0 | 11,090 |
"""
Tests for the integration test suite itself.
"""
import logging
import os
import subprocess
from collections import defaultdict
from pathlib import Path
from typing import Set
import yaml
from get_test_group import patterns_from_group
__maintainer__ = 'adam'
__contact__ = '[email protected]'
log = logging.getLogger(__file__)
def _tests_from_pattern(ci_pattern: str) -> Set[str]:
"""
From a CI pattern, get all tests ``pytest`` would collect.
"""
tests = set([]) # type: Set[str]
args = [
'pytest',
'--disable-pytest-warnings',
'--collect-only',
ci_pattern,
'-q',
]
# Test names will not be in ``stderr`` so we ignore that.
result = subprocess.run(
args=args,
stdout=subprocess.PIPE,
env={**os.environ, **{'PYTHONIOENCODING': 'UTF-8'}},
)
output = result.stdout
for line in output.splitlines():
if b'error in' in line:
message = (
'Error collecting tests for pattern "{ci_pattern}". '
'Full output:\n'
'{output}'
).format(
ci_pattern=ci_pattern,
output=output,
)
raise Exception(message)
# Whitespace is important to avoid confusing pytest warning messages
# with test names. For example, the pytest output may contain '3 tests
# deselected' which would conflict with a test file called
# test_agent_deselected.py if we ignored whitespace.
if (
line and
# Some tests show warnings on collection.
b' warnings' not in line and
# Some tests are skipped on collection.
b'skipped in' not in line and
# Some tests are deselected by the ``pytest.ini`` configuration.
b' deselected' not in line and
not line.startswith(b'no tests ran in')
):
tests.add(line.decode())
return tests
def test_test_groups() -> None:
"""
The test suite is split into various "groups".
This test confirms that the groups together contain all tests, and each
test is collected only once.
"""
test_group_file = Path('test_groups.yaml')
test_group_file_contents = test_group_file.read_text()
test_groups = yaml.load(test_group_file_contents)['groups']
test_patterns = []
for group in test_groups:
test_patterns += patterns_from_group(group_name=group)
# Turn this into a list otherwise we can't cannonically state whether every test was collected _exactly_ once :-)
tests_to_patterns = defaultdict(list) # type: Mapping[str, List]
for pattern in test_patterns:
tests = _tests_from_pattern(ci_pattern=pattern)
for test in tests:
tests_to_patterns[test].append(pattern)
errs = []
for test_name, patterns in tests_to_patterns.items():
message = (
'Test "{test_name}" will be run once for each pattern in '
'{patterns}. '
'Each test should be run only once.'
).format(
test_name=test_name,
patterns=patterns,
)
if len(patterns) != 1:
assert len(patterns) != 1, message
errs.append(message)
if errs:
for message in errs:
log.error(message)
raise Exception("Some tests are not collected exactly once, see errors.")
all_tests = _tests_from_pattern(ci_pattern='')
assert tests_to_patterns.keys() - all_tests == set()
assert all_tests - tests_to_patterns.keys() == set()
| GoelDeepak/dcos | packages/dcos-integration-test/extra/test_meta.py | Python | apache-2.0 | 3,623 |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc as wexc
from neutron.api.v2 import base
from neutron.common import constants as n_const
from neutron import context
from neutron.extensions import portbindings
from neutron.manager import NeutronManager
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import config as ml2_config
from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config
from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc
from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver
from neutron.plugins.ml2.drivers import type_vlan as vlan_config
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
PHYS_NET = 'physnet1'
COMP_HOST_NAME = 'testhost'
COMP_HOST_NAME_2 = 'testhost_2'
VLAN_START = 1000
VLAN_END = 1100
NEXUS_IP_ADDR = '1.1.1.1'
NETWORK_NAME = 'test_network'
NETWORK_NAME_2 = 'test_network_2'
NEXUS_INTERFACE = '1/1'
NEXUS_INTERFACE_2 = '1/2'
CIDR_1 = '10.0.0.0/24'
CIDR_2 = '10.0.1.0/24'
DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111'
DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222'
DEVICE_OWNER = 'compute:None'
class CiscoML2MechanismTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
"""Configure for end-to-end neutron testing using a mock ncclient.
This setup includes:
- Configure the ML2 plugin to use VLANs in the range of 1000-1100.
- Configure the Cisco mechanism driver to use an imaginary switch
at NEXUS_IP_ADDR.
- Create a mock NETCONF client (ncclient) for the Cisco mechanism
driver
"""
self.addCleanup(mock.patch.stopall)
# Configure the ML2 mechanism drivers and network types
ml2_opts = {
'mechanism_drivers': ['cisco_nexus'],
'tenant_network_types': ['vlan'],
}
for opt, val in ml2_opts.items():
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
self.addCleanup(ml2_config.cfg.CONF.reset)
# Configure the ML2 VLAN parameters
phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)])
vlan_config.cfg.CONF.set_override('network_vlan_ranges',
[phys_vrange],
'ml2_type_vlan')
self.addCleanup(vlan_config.cfg.CONF.reset)
# Configure the Cisco Nexus mechanism driver
nexus_config = {
(NEXUS_IP_ADDR, 'username'): 'admin',
(NEXUS_IP_ADDR, 'password'): 'mySecretPassword',
(NEXUS_IP_ADDR, 'ssh_port'): 22,
(NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE,
(NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2}
nexus_patch = mock.patch.dict(
cisco_config.ML2MechCiscoConfig.nexus_dict,
nexus_config)
nexus_patch.start()
self.addCleanup(nexus_patch.stop)
# The NETCONF client module is not included in the DevStack
# distribution, so mock this module for unit testing.
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
# Mock port values for 'status' and 'binding:segmentation_id'
mock_status = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_is_status_active').start()
mock_status.return_value = n_const.PORT_STATUS_ACTIVE
def _mock_get_vlanid(context):
network = context.network.current
if network['name'] == NETWORK_NAME:
return VLAN_START
else:
return VLAN_START + 1
mock_vlanid = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_get_vlanid').start()
mock_vlanid.side_effect = _mock_get_vlanid
super(CiscoML2MechanismTestCase, self).setUp(ML2_PLUGIN)
self.port_create_status = 'DOWN'
@contextlib.contextmanager
def _patch_ncclient(self, attr, value):
"""Configure an attribute on the mock ncclient module.
This method can be used to inject errors by setting a side effect
or a return value for an ncclient method.
:param attr: ncclient attribute (typically method) to be configured.
:param value: Value to be configured on the attribute.
"""
# Configure attribute.
config = {attr: value}
self.mock_ncclient.configure_mock(**config)
# Continue testing
yield
# Unconfigure attribute
config = {attr: None}
self.mock_ncclient.configure_mock(**config)
def _is_in_nexus_cfg(self, words):
"""Check if any config sent to Nexus contains all words in a list."""
for call in (self.mock_ncclient.connect.return_value.
edit_config.mock_calls):
configlet = call[2]['config']
if all(word in configlet for word in words):
return True
return False
def _is_in_last_nexus_cfg(self, words):
"""Confirm last config sent to Nexus contains specified keywords."""
last_cfg = (self.mock_ncclient.connect.return_value.
edit_config.mock_calls[-1][2]['config'])
return all(word in last_cfg for word in words)
def _is_vlan_configured(self, vlan_creation_expected=True,
add_keyword_expected=False):
vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name'])
add_appears = self._is_in_last_nexus_cfg(['add'])
return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and
vlan_created == vlan_creation_expected and
add_appears == add_keyword_expected)
def _is_vlan_unconfigured(self, vlan_deletion_expected=True):
vlan_deleted = self._is_in_last_nexus_cfg(
['no', 'vlan', 'vlan-id-create-delete'])
return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and
vlan_deleted == vlan_deletion_expected)
class TestCiscoBasicGet(CiscoML2MechanismTestCase,
test_db_plugin.TestBasicGet):
pass
class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase,
test_db_plugin.TestV2HTTPResponse):
pass
class TestCiscoPortsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestPortsV2):
@contextlib.contextmanager
def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1,
device_id=DEVICE_ID_1,
host_id=COMP_HOST_NAME):
"""Create network, subnet, and port resources for test cases.
Create a network, subnet, port and then update the port, yield the
result, then delete the port, subnet and network.
:param name: Name of network to be created.
:param cidr: cidr address of subnetwork to be created.
:param device_id: Device ID to use for port to be created/updated.
:param host_id: Host ID to use for port create/update.
"""
with self.network(name=name) as network:
with self.subnet(network=network, cidr=cidr) as subnet:
with self.port(subnet=subnet, cidr=cidr) as port:
data = {'port': {portbindings.HOST_ID: host_id,
'device_id': device_id,
'device_owner': 'compute:none',
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
yield res.status_int
def _assertExpectedHTTP(self, status, exc):
"""Confirm that an HTTP status corresponds to an expected exception.
Confirm that an HTTP status which has been returned for an
neutron API request matches the HTTP status corresponding
to an expected exception.
:param status: HTTP status
:param exc: Expected exception
"""
if exc in base.FAULT_MAP:
expected_http = base.FAULT_MAP[exc].code
else:
expected_http = wexc.HTTPInternalServerError.code
self.assertEqual(status, expected_http)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API chooses the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# Expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_nexus_enable_vlan_cmd(self):
"""Verify the syntax of the command to enable a vlan on an intf.
Confirm that for the first VLAN configured on a Nexus interface,
the command string sent to the switch does not contain the
keyword 'add'.
Confirm that for the second VLAN configured on a Nexus interface,
the command string sent to the switch contains the keyword 'add'.
"""
# First vlan should be configured without 'add' keyword
with self._create_resources():
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
# Second vlan should be configured with 'add' keyword
with self._create_resources(name=NETWORK_NAME_2,
device_id=DEVICE_ID_2,
cidr=CIDR_2):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=True))
def test_nexus_connect_fail(self):
"""Test failure to connect to a Nexus switch.
While creating a network, subnet, and port, simulate a connection
failure to a nexus switch. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient('connect.side_effect',
AttributeError):
with self._create_resources() as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusConnectFailed)
def test_nexus_vlan_config_two_hosts(self):
"""Verify config/unconfig of vlan on two compute hosts."""
@contextlib.contextmanager
def _create_port_check_vlan(comp_host_name, device_id,
vlan_creation_expected=True):
with self.port(subnet=subnet, fmt=self.fmt) as port:
data = {'port': {portbindings.HOST_ID: comp_host_name,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
req.get_response(self.api)
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=vlan_creation_expected,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
yield
# Create network and subnet
with self.network(name=NETWORK_NAME) as network:
with self.subnet(network=network, cidr=CIDR_1) as subnet:
# Create an instance on first compute host
with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1,
vlan_creation_expected=True):
# Create an instance on second compute host
with _create_port_check_vlan(COMP_HOST_NAME_2, DEVICE_ID_2,
vlan_creation_expected=False):
pass
# Instance on second host is now terminated.
# Vlan should be untrunked from port, but vlan should
# still exist on the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=False))
self.mock_ncclient.reset_mock()
# Instance on first host is now terminated.
# Vlan should be untrunked from port and vlan should have
# been deleted from the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=True))
def test_nexus_config_fail(self):
"""Test a Nexus switch configuration failure.
While creating a network, subnet, and port, simulate a nexus
switch configuration error. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
AttributeError):
with self._create_resources() as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusConfigFailed)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
def mock_edit_config_a(target, config):
if all(word in config for word in ['state', 'active']):
raise Exception("Can't modify state for extended")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_a):
with self._create_resources() as result_status:
self.assertEqual(result_status, wexc.HTTPOk.code)
def mock_edit_config_b(target, config):
if all(word in config for word in ['no', 'shutdown']):
raise Exception("Command is only allowed on VLAN")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_b):
with self._create_resources() as result_status:
self.assertEqual(result_status, wexc.HTTPOk.code)
def test_nexus_vlan_config_rollback(self):
"""Test rollback following Nexus VLAN state config failure.
Test that the Cisco Nexus plugin correctly deletes the VLAN
on the Nexus switch when the 'state active' command fails (for
a reason other than state configuration change is rejected
for the extended VLAN range).
"""
def mock_edit_config(target, config):
if all(word in config for word in ['state', 'active']):
raise ValueError
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config):
with self._create_resources() as result_status:
# Confirm that the last configuration sent to the Nexus
# switch was deletion of the VLAN.
self.assertTrue(self._is_in_last_nexus_cfg(['<no>', '<vlan>']))
self._assertExpectedHTTP(result_status,
c_exc.NexusConfigFailed)
def test_nexus_host_not_configured(self):
"""Test handling of a NexusComputeHostNotConfigured exception.
Test the Cisco NexusComputeHostNotConfigured exception by using
a fictitious host name during port creation.
"""
with self._create_resources(host_id='fake_host') as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusComputeHostNotConfigured)
def test_nexus_missing_fields(self):
"""Test handling of a NexusMissingRequiredFields exception.
Test the Cisco NexusMissingRequiredFields exception by using
empty host_id and device_id values during port creation.
"""
with self._create_resources(device_id='', host_id='') as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusMissingRequiredFields)
class TestCiscoNetworksV2(CiscoML2MechanismTestCase,
test_db_plugin.TestNetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
LOG.debug("response is %s" % res)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_network
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
class TestCiscoSubnetsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestSubnetsV2):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
class TestCiscoPortsV2XML(TestCiscoPortsV2):
fmt = 'xml'
class TestCiscoNetworksV2XML(TestCiscoNetworksV2):
fmt = 'xml'
class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2):
fmt = 'xml'
| Juniper/neutron | neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py | Python | apache-2.0 | 24,807 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Unit tests for the Dataset.py module'''
import unittest
from ocw.dataset import Dataset, Bounds
import numpy as np
import datetime as dt
class TestDatasetAttributes(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.name = 'foo'
self.origin = {'path': '/a/fake/file/path'}
self.test_dataset = Dataset(self.lat,
self.lon,
self.time,
self.value,
variable=self.variable,
name=self.name,
origin=self.origin)
def test_lats(self):
self.assertItemsEqual(self.test_dataset.lats, self.lat)
def test_lons(self):
self.assertItemsEqual(self.test_dataset.lons, self.lon)
def test_times(self):
self.assertItemsEqual(self.test_dataset.times, self.time)
def test_values(self):
self.assertEqual(self.test_dataset.values.all(), self.value.all())
def test_variable(self):
self.assertEqual(self.test_dataset.variable, self.variable)
def test_name(self):
self.assertEqual(self.test_dataset.name, self.name)
def test_origin(self):
self.assertEqual(self.test_dataset.origin, self.origin)
class TestInvalidDatasetInit(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.values_in_wrong_order = flat_array.reshape(5, 5, 12)
def test_bad_lat_shape(self):
self.lat = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_lon_shape(self):
self.lon = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_times_shape(self):
self.time = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_values_shape(self):
self.value = np.array([1, 2, 3, 4, 5])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_shape_mismatch(self):
# If we change lats to this the shape of value will not match
# up with the length of the lats array.
self.lat = self.lat[:-2]
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_given_in_wrong_order(self):
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.values_in_wrong_order)
def test_lons_values_incorrectly_gridded(self):
times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
lats = np.arange(-30, 30)
bad_lons = np.arange(360)
flat_array = np.arange(len(times) * len(lats) * len(bad_lons))
values = flat_array.reshape(len(times), len(lats), len(bad_lons))
ds = Dataset(lats, bad_lons, times, values)
np.testing.assert_array_equal(ds.lons, np.arange(-180, 180))
def test_reversed_lats(self):
ds = Dataset(self.lat[::-1], self.lon, self.time, self.value)
np.testing.assert_array_equal(ds.lats, self.lat)
class TestDatasetFunctions(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.test_dataset = Dataset(self.lat, self.lon, self.time,
self.value, self.variable)
def test_spatial_boundaries(self):
self.assertEqual(
self.test_dataset.spatial_boundaries(),
(min(self.lat), max(self.lat), min(self.lon), max(self.lon)))
def test_time_range(self):
self.assertEqual(
self.test_dataset.time_range(),
(dt.datetime(2000, 1, 1), dt.datetime(2000, 12, 1)))
def test_spatial_resolution(self):
self.assertEqual(self.test_dataset.spatial_resolution(), (2, 2))
def test_temporal_resolution(self):
self.assertEqual(self.test_dataset.temporal_resolution(), 'monthly')
class TestBounds(unittest.TestCase):
def setUp(self):
self.bounds = Bounds(-80, 80, # Lats
-160, 160, # Lons
dt.datetime(2000, 1, 1), # Start time
dt.datetime(2002, 1, 1)) # End time
# Latitude tests
def test_inverted_min_max_lat(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = 81
with self.assertRaises(ValueError):
self.bounds.lat_max = -81
# Lat Min
def test_out_of_bounds_lat_min(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = -91
with self.assertRaises(ValueError):
self.bounds.lat_min = 91
# Lat Max
def test_out_of_bounds_lat_max(self):
with self.assertRaises(ValueError):
self.bounds.lat_max = -91
with self.assertRaises(ValueError):
self.bounds.lat_max = 91
# Longitude tests
def test_inverted_max_max_lon(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = 161
with self.assertRaises(ValueError):
self.bounds.lon_max = -161
# Lon Min
def test_out_of_bounds_lon_min(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = -181
with self.assertRaises(ValueError):
self.bounds.lon_min = 181
# Lon Max
def test_out_of_bounds_lon_max(self):
with self.assertRaises(ValueError):
self.bounds.lon_max = -181
with self.assertRaises(ValueError):
self.bounds.lon_max = 181
# Temporal tests
def test_inverted_start_end_times(self):
with self.assertRaises(ValueError):
self.bounds.start = dt.datetime(2003, 1, 1)
with self.assertRaises(ValueError):
self.bounds.end = dt.datetime(1999, 1, 1)
# Start tests
def test_invalid_start(self):
with self.assertRaises(ValueError):
self.bounds.start = "This is not a date time object"
# End tests
def test_invalid_end(self):
with self.assertRaises(ValueError):
self.bounds.end = "This is not a date time object"
if __name__ == '__main__':
unittest.main()
| MJJoyce/climate | ocw/tests/test_dataset.py | Python | apache-2.0 | 8,091 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import sys
import os
from datetime import date
# eventlet/gevent should not monkey patch anything.
os.environ["GEVENT_NOPATCH"] = "yes"
os.environ["EVENTLET_NOPATCH"] = "yes"
#os.environ["CELERY_LOADER"] = "default"
this = os.path.dirname(os.path.abspath(__file__))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.join(os.pardir, "tests"))
sys.path.append(os.path.join(this, "_ext"))
#import celery
# General configuration
# ---------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx',
]
html_show_sphinx = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Apache Flume'
copyright = '2009-%s The Apache Software Foundation' % date.today().year
keep_warnings = True
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ".".join(map(str, celery.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
#release = celery.__version__
exclude_trees = ['.build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
#intersphinx_mapping = {
# "http://docs.python.org/dev": None,
# "http://kombu.readthedocs.org/en/latest/": None,
# "http://django-celery.readthedocs.org/en/latest": None,
#}
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
highlight_language = 'none'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['../resources/images']
html_logo = 'images/flume-logo.png'
html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
#html_theme = 'default'
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sourcelink.html'],
}
| tmgstevens/flume | flume-ng-doc/sphinx/conf.py | Python | apache-2.0 | 3,274 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Cauchy distribution"""
__all__ = ['Cauchy']
from numbers import Number
from numpy import nan, pi
from .constraint import Real
from .distribution import Distribution
from .utils import sample_n_shape_converter
from .... import np
class Cauchy(Distribution):
r"""Create a relaxed Cauchy distribution object.
Parameters
----------
loc : Tensor or scalar, default 0
mode or median of the distribution
scale : Tensor or scalar, default 1
half width at half maximum
"""
# pylint: disable=abstract-method
has_grad = True
support = Real()
arg_constraints = {'loc': Real(), 'scale': Real()}
def __init__(self, loc=0.0, scale=1.0, validate_args=None):
self.loc = loc
self.scale = scale
super(Cauchy, self).__init__(
event_dim=0, validate_args=validate_args)
@property
def mean(self):
return nan
@property
def variance(self):
return nan
def sample(self, size=None):
# TODO: Implement sampling op in the backend.
# `np.zeros_like` does not support scalar at this moment.
if (isinstance(self.loc, Number), isinstance(self.scale, Number)) == (True, True):
u = np.random.uniform(size=size)
else:
u = np.random.uniform(np.zeros_like( # pylint: disable=too-many-function-args
self.loc + self.scale), size=size)
return self.icdf(u)
def sample_n(self, size=None):
return self.sample(sample_n_shape_converter(size))
def log_prob(self, value):
if self._validate_args:
self._validate_samples(value)
return (-np.log(pi) - np.log(self.scale) -
np.log(1 + ((value - self.loc) / self.scale) ** 2))
def cdf(self, value):
if self._validate_args:
self._validate_samples(value)
return np.arctan((value - self.loc) / self.scale) / pi + 0.5
def icdf(self, value):
return np.tan(pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
return np.log(4 * pi) + np.log(self.scale)
| szha/mxnet | python/mxnet/gluon/probability/distributions/cauchy.py | Python | apache-2.0 | 2,935 |
from django.conf.urls import url
from admin.nodes import views
app_name = 'admin'
urlpatterns = [
url(r'^$', views.NodeFormView.as_view(),
name='search'),
url(r'^flagged_spam$', views.NodeFlaggedSpamList.as_view(),
name='flagged-spam'),
url(r'^known_spam$', views.NodeKnownSpamList.as_view(),
name='known-spam'),
url(r'^known_ham$', views.NodeKnownHamList.as_view(),
name='known-ham'),
url(r'^(?P<guid>[a-z0-9]+)/$', views.NodeView.as_view(),
name='node'),
url(r'^(?P<guid>[a-z0-9]+)/logs/$', views.AdminNodeLogView.as_view(),
name='node-logs'),
url(r'^registration_list/$', views.RegistrationListView.as_view(),
name='registrations'),
url(r'^stuck_registration_list/$', views.StuckRegistrationListView.as_view(),
name='stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/update_embargo/$',
views.RegistrationUpdateEmbargoView.as_view(), name='update_embargo'),
url(r'^(?P<guid>[a-z0-9]+)/remove/$', views.NodeDeleteView.as_view(),
name='remove'),
url(r'^(?P<guid>[a-z0-9]+)/restore/$', views.NodeDeleteView.as_view(),
name='restore'),
url(r'^(?P<guid>[a-z0-9]+)/confirm_spam/$', views.NodeConfirmSpamView.as_view(),
name='confirm-spam'),
url(r'^(?P<guid>[a-z0-9]+)/confirm_ham/$', views.NodeConfirmHamView.as_view(),
name='confirm-ham'),
url(r'^(?P<guid>[a-z0-9]+)/reindex_share_node/$', views.NodeReindexShare.as_view(),
name='reindex-share-node'),
url(r'^(?P<guid>[a-z0-9]+)/reindex_elastic_node/$', views.NodeReindexElastic.as_view(),
name='reindex-elastic-node'),
url(r'^(?P<guid>[a-z0-9]+)/restart_stuck_registrations/$', views.RestartStuckRegistrationsView.as_view(),
name='restart-stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/remove_stuck_registrations/$', views.RemoveStuckRegistrationsView.as_view(),
name='remove-stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/remove_user/(?P<user_id>[a-z0-9]+)/$',
views.NodeRemoveContributorView.as_view(), name='remove_user'),
]
| pattisdr/osf.io | admin/nodes/urls.py | Python | apache-2.0 | 2,100 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DummyOperator(BaseOperator):
"""
Operator that does literally nothing. It can be used to group tasks in a
DAG.
"""
ui_color = '#e8f7e4'
@apply_defaults
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def execute(self, context):
pass
| wileeam/airflow | airflow/operators/dummy_operator.py | Python | apache-2.0 | 1,203 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from lxml import objectify, etree
from django.contrib.auth.models import Group, User
from useradmin.models import HuePermission, GroupPermission, get_default_user_group
from hadoop import cluster
from desktop.lib import fsmanager
def grant_access(username, groupname, appname):
add_permission(username, groupname, 'access', appname)
def add_permission(username, groupname, permname, appname):
user = User.objects.get(username=username)
group, created = Group.objects.get_or_create(name=groupname)
perm, created = HuePermission.objects.get_or_create(app=appname, action=permname)
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
if not user.groups.filter(name=group.name).exists():
user.groups.add(group)
user.save()
def add_to_group(username, groupname=None):
if groupname is None:
group = get_default_user_group()
assert group is not None
groupname = group.name
user = User.objects.get(username=username)
group, created = Group.objects.get_or_create(name=groupname)
if not user.groups.filter(name=group.name).exists():
user.groups.add(group)
user.save()
def remove_from_group(username, groupname):
user = User.objects.get(username=username)
group, created = Group.objects.get_or_create(name=groupname)
if user.groups.filter(name=group.name).exists():
user.groups.remove(group)
user.save()
def reformat_json(json_obj):
if isinstance(json_obj, basestring):
return json.dumps(json.loads(json_obj))
else:
return json.dumps(json_obj)
def reformat_xml(xml_obj):
if isinstance(xml_obj, basestring):
return etree.tostring(objectify.fromstring(xml_obj, etree.XMLParser(strip_cdata=False, remove_blank_text=True)))
else:
return etree.tostring(xml_obj)
def clear_sys_caches():
return cluster.clear_caches(), fsmanager.clear_cache()
def restore_sys_caches(old_caches):
cluster.restore_caches(old_caches[0])
fsmanager.restore_cache(old_caches[1]) | Peddle/hue | desktop/core/src/desktop/lib/test_utils.py | Python | apache-2.0 | 2,861 |
"""Support for the Foobot indoor air quality monitor."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
from foobot_async import FoobotClient
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_TIME,
CONF_TOKEN,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_HUMIDITY = "humidity"
ATTR_PM2_5 = "PM2.5"
ATTR_CARBON_DIOXIDE = "CO2"
ATTR_VOLATILE_ORGANIC_COMPOUNDS = "VOC"
ATTR_FOOBOT_INDEX = "index"
SENSOR_TYPES = {
"time": [ATTR_TIME, "s"],
"pm": [ATTR_PM2_5, "µg/m3", "mdi:cloud"],
"tmp": [ATTR_TEMPERATURE, TEMP_CELSIUS, "mdi:thermometer"],
"hum": [ATTR_HUMIDITY, "%", "mdi:water-percent"],
"co2": [ATTR_CARBON_DIOXIDE, "ppm", "mdi:periodic-table-co2"],
"voc": [ATTR_VOLATILE_ORGANIC_COMPOUNDS, "ppb", "mdi:cloud"],
"allpollu": [ATTR_FOOBOT_INDEX, "%", "mdi:percent"],
}
SCAN_INTERVAL = timedelta(minutes=10)
PARALLEL_UPDATES = 1
TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_TOKEN): cv.string, vol.Required(CONF_USERNAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the devices associated with the account."""
token = config.get(CONF_TOKEN)
username = config.get(CONF_USERNAME)
client = FoobotClient(
token, username, async_get_clientsession(hass), timeout=TIMEOUT
)
dev = []
try:
devices = await client.get_devices()
_LOGGER.debug("The following devices were found: %s", devices)
for device in devices:
foobot_data = FoobotData(client, device["uuid"])
for sensor_type in SENSOR_TYPES:
if sensor_type == "time":
continue
foobot_sensor = FoobotSensor(foobot_data, device, sensor_type)
dev.append(foobot_sensor)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
FoobotClient.TooManyRequests,
FoobotClient.InternalError,
):
_LOGGER.exception("Failed to connect to foobot servers.")
raise PlatformNotReady
except FoobotClient.ClientError:
_LOGGER.error("Failed to fetch data from foobot servers.")
return
async_add_entities(dev, True)
class FoobotSensor(Entity):
"""Implementation of a Foobot sensor."""
def __init__(self, data, device, sensor_type):
"""Initialize the sensor."""
self._uuid = device["uuid"]
self.foobot_data = data
self._name = "Foobot {} {}".format(device["name"], SENSOR_TYPES[sensor_type][0])
self.type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
try:
data = self.foobot_data.data[self.type]
except (KeyError, TypeError):
data = None
return data
@property
def unique_id(self):
"""Return the unique id of this entity."""
return f"{self._uuid}_{self.type}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data."""
await self.foobot_data.async_update()
class FoobotData(Entity):
"""Get data from Foobot API."""
def __init__(self, client, uuid):
"""Initialize the data object."""
self._client = client
self._uuid = uuid
self.data = {}
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Get the data from Foobot API."""
interval = SCAN_INTERVAL.total_seconds()
try:
response = await self._client.get_last_data(
self._uuid, interval, interval + 1
)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
self._client.TooManyRequests,
self._client.InternalError,
):
_LOGGER.debug("Couldn't fetch data")
return False
_LOGGER.debug("The data response is: %s", response)
self.data = {k: round(v, 1) for k, v in response[0].items()}
return True
| leppa/home-assistant | homeassistant/components/foobot/sensor.py | Python | apache-2.0 | 4,894 |
import six
from hamcrest.core.base_matcher import Matcher
from hamcrest.core.core.isequal import equal_to
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
import types
def wrap_matcher(x):
"""Wraps argument in a matcher, if necessary.
:returns: the argument as-is if it is already a matcher, otherwise wrapped
in an :py:func:`~hamcrest.core.core.isequal.equal_to` matcher.
"""
if isinstance(x, Matcher):
return x
else:
return equal_to(x)
def is_matchable_type(expected_type):
if isinstance(expected_type, type):
return True
if isinstance(expected_type, six.class_types):
return True
if isinstance(expected_type, tuple) and \
expected_type and \
all(map(is_matchable_type, expected_type)):
return True
return False
| axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/hamcrest/core/helpers/wrap_matcher.py | Python | apache-2.0 | 880 |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import shutil
import unittest2 as unittest
from gppylib.db import dbconn
from gppylib.commands.base import Command
from gppylib.commands.gp import GpStart, GpStop
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
class transactions(MPPTestCase):
def test_skip_checkpoint_abort_transaction(self):
"""
@description FATAL failure execution handles already committed transactions properly
@created 2013-04-19 00:00:00
@modified 2013-04-19 00:00:00
@tags transaction checkpoint MPP-17817 MPP-17925 MPP-17926 MPP-17927 MPP-17928 schedule_transaction
@product_version gpdb: [4.1.2.5- main]
Repro steps:
1. GPDB is up and running, number of segments is irrelevant, no master standby is required,
no segment mirroring is required
2. inject fault on master for skipping checkpoints
> gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0
3. inject fault 'fatal' on master, it aborts already committed local transaction
> gpfaultinjector -p 4100 -m async -s 1 -f local_tm_record_transaction_commit -y panic_suppress
4. create table 'test'
> psql template1 -c 'create table test(a int);'
5. connect in utility mode to master and create table, insert rows into table and truncate table
> PGOPTIONS='-c gp_session_role=utility -c allow_system_table_mods=dml' psql -p 4100 template1
begin;
create table test21(a int);
insert into test21(a) values(10);
truncate table test21;
commit;
6. Wait 5 minutes
7. GPDB immediate shutdown and restart, GPDB does not come up with versions without fix,
GPDB comes up with versions with fix
> gpstop -air
"""
master_port = os.getenv("PGPORT", "5432")
cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0")
cmd.run()
cmd = Command(name="gpfaultinjector",
cmdStr="gpfaultinjector -p %s -m async -s 1 \
-f local_tm_record_transaction_commit -y panic_suppress" % master_port)
cmd.run()
PSQL.run_sql_command("create table mpp17817(a int)")
sql_file = local_path('mpp17817.sql')
PSQL.run_sql_file(sql_file, PGOPTIONS="-c gp_session_role=utility")
time.sleep(300)
cmd = Command(name="gpstop restart immediate",
cmdStr="source %s/greenplum_path.sh;\
gpstop -air" % os.environ["GPHOME"])
cmd.run(validateAfter=True)
# Cleanup
PSQL.run_sql_command("drop table mpp17817")
PSQL.run_sql_command("drop table mpp17817_21")
| CraigHarris/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/transaction_management/skip_checkpoint_abort_transaction/test_skip_checkpoint_abort_transaction.py | Python | apache-2.0 | 3,466 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""NEC plugin sharednet
Revision ID: 3b54bf9e29f7
Revises: 511471cc46b
Create Date: 2013-02-17 09:21:48.287134
"""
# revision identifiers, used by Alembic.
revision = '3b54bf9e29f7'
down_revision = '511471cc46b'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nec.nec_plugin.NECPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'ofctenantmappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('quantum_id'),
sa.UniqueConstraint('ofc_id')
)
op.create_table(
'ofcnetworkmappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('quantum_id'),
sa.UniqueConstraint('ofc_id')
)
op.create_table(
'ofcportmappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('quantum_id'),
sa.UniqueConstraint('ofc_id')
)
op.create_table(
'ofcfiltermappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('quantum_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('quantum_id'),
sa.UniqueConstraint('ofc_id')
)
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('ofcfiltermappings')
op.drop_table('ofcportmappings')
op.drop_table('ofcnetworkmappings')
op.drop_table('ofctenantmappings')
| Brocade-OpenSource/OpenStack-DNRM-Neutron | neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py | Python | apache-2.0 | 2,645 |
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print()
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print()
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print()
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print()
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print()
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_frozenset',
'test_slice',
]
| pombredanne/voc | tests/builtins/test_print.py | Python | bsd-3-clause | 1,816 |
for astTuple in Query.input.tuples('ast'):
if type(astTuple.ast) is Field:
modifiers = astTuple.ast.modifiers
nonFinalPublic = modifiers.isSet(Modifier.ModifierFlag.Public) and not modifiers.isSet(Modifier.ModifierFlag.Final)
if not nonFinalPublic:
Query.input.remove(astTuple)
Query.result = Query.input | Vaishal-shah/Envision | InformationScripting/scripts/evaluation/nonFinalPublic.py | Python | bsd-3-clause | 345 |
"""
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore():
import botocore.session
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path)
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider)
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def get_testenv():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
def assert_samelines(testcase, text1, text2, msg=None):
"""Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
| umrashrf/scrapy | scrapy/utils/test.py | Python | bsd-3-clause | 3,020 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for jni_generator.py.
This test suite contains various tests for the JNI generator.
It exercises the low-level parser all the way up to the
code generator and ensures the output matches a golden
file.
"""
import difflib
import inspect
import optparse
import os
import sys
import unittest
import jni_generator
from jni_generator import CalledByNative, JniParams, NativeMethod, Param
SCRIPT_NAME = 'base/android/jni_generator/jni_generator.py'
INCLUDES = (
'base/android/jni_generator/jni_generator_helper.h'
)
# Set this environment variable in order to regenerate the golden text
# files.
REBASELINE_ENV = 'REBASELINE'
class TestOptions(object):
"""The mock options object which is passed to the jni_generator.py script."""
def __init__(self):
self.namespace = None
self.script_name = SCRIPT_NAME
self.includes = INCLUDES
self.ptr_type = 'long'
self.cpp = 'cpp'
self.javap = 'javap'
self.native_exports = False
self.native_exports_optional = False
class TestGenerator(unittest.TestCase):
def assertObjEquals(self, first, second):
dict_first = first.__dict__
dict_second = second.__dict__
self.assertEquals(dict_first.keys(), dict_second.keys())
for key, value in dict_first.iteritems():
if (type(value) is list and len(value) and
isinstance(type(value[0]), object)):
self.assertListEquals(value, second.__getattribute__(key))
else:
actual = second.__getattribute__(key)
self.assertEquals(value, actual,
'Key ' + key + ': ' + str(value) + '!=' + str(actual))
def assertListEquals(self, first, second):
self.assertEquals(len(first), len(second))
for i in xrange(len(first)):
if isinstance(first[i], object):
self.assertObjEquals(first[i], second[i])
else:
self.assertEquals(first[i], second[i])
def assertTextEquals(self, golden_text, generated_text):
if not self.compareText(golden_text, generated_text):
self.fail('Golden text mismatch.')
def compareText(self, golden_text, generated_text):
def FilterText(text):
return [
l.strip() for l in text.split('\n')
if not l.startswith('// Copyright')
]
stripped_golden = FilterText(golden_text)
stripped_generated = FilterText(generated_text)
if stripped_golden == stripped_generated:
return True
print self.id()
for line in difflib.context_diff(stripped_golden, stripped_generated):
print line
print '\n\nGenerated'
print '=' * 80
print generated_text
print '=' * 80
print 'Run with:'
print 'REBASELINE=1', sys.argv[0]
print 'to regenerate the data files.'
def _ReadGoldenFile(self, golden_file):
if not os.path.exists(golden_file):
return None
with file(golden_file, 'r') as f:
return f.read()
def assertGoldenTextEquals(self, generated_text):
script_dir = os.path.dirname(sys.argv[0])
# This is the caller test method.
caller = inspect.stack()[1][3]
self.assertTrue(caller.startswith('test'),
'assertGoldenTextEquals can only be called from a '
'test* method, not %s' % caller)
golden_file = os.path.join(script_dir, caller + '.golden')
golden_text = self._ReadGoldenFile(golden_file)
if os.environ.get(REBASELINE_ENV):
if golden_text != generated_text:
with file(golden_file, 'w') as f:
f.write(generated_text)
return
self.assertTextEquals(golden_text, generated_text)
def testInspectCaller(self):
def willRaise():
# This function can only be called from a test* method.
self.assertGoldenTextEquals('')
self.assertRaises(AssertionError, willRaise)
def testNatives(self):
test_data = """"
interface OnFrameAvailableListener {}
private native int nativeInit();
private native void nativeDestroy(int nativeChromeBrowserProvider);
private native long nativeAddBookmark(
int nativeChromeBrowserProvider,
String url, String title, boolean isFolder, long parentId);
private static native String nativeGetDomainAndRegistry(String url);
private static native void nativeCreateHistoricalTabFromState(
byte[] state, int tab_index);
private native byte[] nativeGetStateAsByteArray(View view);
private static native String[] nativeGetAutofillProfileGUIDs();
private native void nativeSetRecognitionResults(
int sessionId, String[] results);
private native long nativeAddBookmarkFromAPI(
int nativeChromeBrowserProvider,
String url, Long created, Boolean isBookmark,
Long date, byte[] favicon, String title, Integer visits);
native int nativeFindAll(String find);
private static native OnFrameAvailableListener nativeGetInnerClass();
private native Bitmap nativeQueryBitmap(
int nativeChromeBrowserProvider,
String[] projection, String selection,
String[] selectionArgs, String sortOrder);
private native void nativeGotOrientation(
int nativeDataFetcherImplAndroid,
double alpha, double beta, double gamma);
private static native Throwable nativeMessWithJavaException(Throwable e);
"""
jni_generator.JniParams.SetFullyQualifiedClass(
'org/chromium/example/jni_generator/SampleForTests')
jni_generator.JniParams.ExtractImportsAndInnerClasses(test_data)
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(return_type='int', static=False,
name='Init',
params=[],
java_class_name=None,
type='function'),
NativeMethod(return_type='void', static=False, name='Destroy',
params=[Param(datatype='int',
name='nativeChromeBrowserProvider')],
java_class_name=None,
type='method',
p0_type='ChromeBrowserProvider'),
NativeMethod(return_type='long', static=False, name='AddBookmark',
params=[Param(datatype='int',
name='nativeChromeBrowserProvider'),
Param(datatype='String',
name='url'),
Param(datatype='String',
name='title'),
Param(datatype='boolean',
name='isFolder'),
Param(datatype='long',
name='parentId')],
java_class_name=None,
type='method',
p0_type='ChromeBrowserProvider'),
NativeMethod(return_type='String', static=True,
name='GetDomainAndRegistry',
params=[Param(datatype='String',
name='url')],
java_class_name=None,
type='function'),
NativeMethod(return_type='void', static=True,
name='CreateHistoricalTabFromState',
params=[Param(datatype='byte[]',
name='state'),
Param(datatype='int',
name='tab_index')],
java_class_name=None,
type='function'),
NativeMethod(return_type='byte[]', static=False,
name='GetStateAsByteArray',
params=[Param(datatype='View', name='view')],
java_class_name=None,
type='function'),
NativeMethod(return_type='String[]', static=True,
name='GetAutofillProfileGUIDs', params=[],
java_class_name=None,
type='function'),
NativeMethod(return_type='void', static=False,
name='SetRecognitionResults',
params=[Param(datatype='int', name='sessionId'),
Param(datatype='String[]', name='results')],
java_class_name=None,
type='function'),
NativeMethod(return_type='long', static=False,
name='AddBookmarkFromAPI',
params=[Param(datatype='int',
name='nativeChromeBrowserProvider'),
Param(datatype='String',
name='url'),
Param(datatype='Long',
name='created'),
Param(datatype='Boolean',
name='isBookmark'),
Param(datatype='Long',
name='date'),
Param(datatype='byte[]',
name='favicon'),
Param(datatype='String',
name='title'),
Param(datatype='Integer',
name='visits')],
java_class_name=None,
type='method',
p0_type='ChromeBrowserProvider'),
NativeMethod(return_type='int', static=False,
name='FindAll',
params=[Param(datatype='String',
name='find')],
java_class_name=None,
type='function'),
NativeMethod(return_type='OnFrameAvailableListener', static=True,
name='GetInnerClass',
params=[],
java_class_name=None,
type='function'),
NativeMethod(return_type='Bitmap',
static=False,
name='QueryBitmap',
params=[Param(datatype='int',
name='nativeChromeBrowserProvider'),
Param(datatype='String[]',
name='projection'),
Param(datatype='String',
name='selection'),
Param(datatype='String[]',
name='selectionArgs'),
Param(datatype='String',
name='sortOrder'),
],
java_class_name=None,
type='method',
p0_type='ChromeBrowserProvider'),
NativeMethod(return_type='void', static=False,
name='GotOrientation',
params=[Param(datatype='int',
name='nativeDataFetcherImplAndroid'),
Param(datatype='double',
name='alpha'),
Param(datatype='double',
name='beta'),
Param(datatype='double',
name='gamma'),
],
java_class_name=None,
type='method',
p0_type='content::DataFetcherImplAndroid'),
NativeMethod(return_type='Throwable', static=True,
name='MessWithJavaException',
params=[Param(datatype='Throwable', name='e')],
java_class_name=None,
type='function')
]
self.assertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], TestOptions())
self.assertGoldenTextEquals(h.GetContent())
def testInnerClassNatives(self):
test_data = """
class MyInnerClass {
@NativeCall("MyInnerClass")
private native int nativeInit();
}
"""
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(return_type='int', static=False,
name='Init', params=[],
java_class_name='MyInnerClass',
type='function')
]
self.assertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], TestOptions())
self.assertGoldenTextEquals(h.GetContent())
def testInnerClassNativesMultiple(self):
test_data = """
class MyInnerClass {
@NativeCall("MyInnerClass")
private native int nativeInit();
}
class MyOtherInnerClass {
@NativeCall("MyOtherInnerClass")
private native int nativeInit();
}
"""
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(return_type='int', static=False,
name='Init', params=[],
java_class_name='MyInnerClass',
type='function'),
NativeMethod(return_type='int', static=False,
name='Init', params=[],
java_class_name='MyOtherInnerClass',
type='function')
]
self.assertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], TestOptions())
self.assertGoldenTextEquals(h.GetContent())
def testInnerClassNativesBothInnerAndOuter(self):
test_data = """
class MyOuterClass {
private native int nativeInit();
class MyOtherInnerClass {
@NativeCall("MyOtherInnerClass")
private native int nativeInit();
}
}
"""
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(return_type='int', static=False,
name='Init', params=[],
java_class_name=None,
type='function'),
NativeMethod(return_type='int', static=False,
name='Init', params=[],
java_class_name='MyOtherInnerClass',
type='function')
]
self.assertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], TestOptions())
self.assertGoldenTextEquals(h.GetContent())
def testCalledByNatives(self):
test_data = """"
import android.graphics.Bitmap;
import android.view.View;
import java.io.InputStream;
import java.util.List;
class InnerClass {}
@CalledByNative
InnerClass showConfirmInfoBar(int nativeInfoBar,
String buttonOk, String buttonCancel, String title, Bitmap icon) {
InfoBar infobar = new ConfirmInfoBar(nativeInfoBar, mContext,
buttonOk, buttonCancel,
title, icon);
return infobar;
}
@CalledByNative
InnerClass showAutoLoginInfoBar(int nativeInfoBar,
String realm, String account, String args) {
AutoLoginInfoBar infobar = new AutoLoginInfoBar(nativeInfoBar, mContext,
realm, account, args);
if (infobar.displayedAccountCount() == 0)
infobar = null;
return infobar;
}
@CalledByNative("InfoBar")
void dismiss();
@SuppressWarnings("unused")
@CalledByNative
private static boolean shouldShowAutoLogin(View view,
String realm, String account, String args) {
AccountManagerContainer accountManagerContainer =
new AccountManagerContainer((Activity)contentView.getContext(),
realm, account, args);
String[] logins = accountManagerContainer.getAccountLogins(null);
return logins.length != 0;
}
@CalledByNative
static InputStream openUrl(String url) {
return null;
}
@CalledByNative
private void activateHardwareAcceleration(final boolean activated,
final int iPid, final int iType,
final int iPrimaryID, final int iSecondaryID) {
if (!activated) {
return
}
}
@CalledByNativeUnchecked
private void uncheckedCall(int iParam);
@CalledByNative
public byte[] returnByteArray();
@CalledByNative
public boolean[] returnBooleanArray();
@CalledByNative
public char[] returnCharArray();
@CalledByNative
public short[] returnShortArray();
@CalledByNative
public int[] returnIntArray();
@CalledByNative
public long[] returnLongArray();
@CalledByNative
public double[] returnDoubleArray();
@CalledByNative
public Object[] returnObjectArray();
@CalledByNative
public byte[][] returnArrayOfByteArray();
@CalledByNative
public Bitmap.CompressFormat getCompressFormat();
@CalledByNative
public List<Bitmap.CompressFormat> getCompressFormatList();
"""
jni_generator.JniParams.SetFullyQualifiedClass('org/chromium/Foo')
jni_generator.JniParams.ExtractImportsAndInnerClasses(test_data)
called_by_natives = jni_generator.ExtractCalledByNatives(test_data)
golden_called_by_natives = [
CalledByNative(
return_type='InnerClass',
system_class=False,
static=False,
name='showConfirmInfoBar',
method_id_var_name='showConfirmInfoBar',
java_class_name='',
params=[Param(datatype='int', name='nativeInfoBar'),
Param(datatype='String', name='buttonOk'),
Param(datatype='String', name='buttonCancel'),
Param(datatype='String', name='title'),
Param(datatype='Bitmap', name='icon')],
env_call=('Object', ''),
unchecked=False,
),
CalledByNative(
return_type='InnerClass',
system_class=False,
static=False,
name='showAutoLoginInfoBar',
method_id_var_name='showAutoLoginInfoBar',
java_class_name='',
params=[Param(datatype='int', name='nativeInfoBar'),
Param(datatype='String', name='realm'),
Param(datatype='String', name='account'),
Param(datatype='String', name='args')],
env_call=('Object', ''),
unchecked=False,
),
CalledByNative(
return_type='void',
system_class=False,
static=False,
name='dismiss',
method_id_var_name='dismiss',
java_class_name='InfoBar',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='boolean',
system_class=False,
static=True,
name='shouldShowAutoLogin',
method_id_var_name='shouldShowAutoLogin',
java_class_name='',
params=[Param(datatype='View', name='view'),
Param(datatype='String', name='realm'),
Param(datatype='String', name='account'),
Param(datatype='String', name='args')],
env_call=('Boolean', ''),
unchecked=False,
),
CalledByNative(
return_type='InputStream',
system_class=False,
static=True,
name='openUrl',
method_id_var_name='openUrl',
java_class_name='',
params=[Param(datatype='String', name='url')],
env_call=('Object', ''),
unchecked=False,
),
CalledByNative(
return_type='void',
system_class=False,
static=False,
name='activateHardwareAcceleration',
method_id_var_name='activateHardwareAcceleration',
java_class_name='',
params=[Param(datatype='boolean', name='activated'),
Param(datatype='int', name='iPid'),
Param(datatype='int', name='iType'),
Param(datatype='int', name='iPrimaryID'),
Param(datatype='int', name='iSecondaryID'),
],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='void',
system_class=False,
static=False,
name='uncheckedCall',
method_id_var_name='uncheckedCall',
java_class_name='',
params=[Param(datatype='int', name='iParam')],
env_call=('Void', ''),
unchecked=True,
),
CalledByNative(
return_type='byte[]',
system_class=False,
static=False,
name='returnByteArray',
method_id_var_name='returnByteArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='boolean[]',
system_class=False,
static=False,
name='returnBooleanArray',
method_id_var_name='returnBooleanArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='char[]',
system_class=False,
static=False,
name='returnCharArray',
method_id_var_name='returnCharArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='short[]',
system_class=False,
static=False,
name='returnShortArray',
method_id_var_name='returnShortArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='int[]',
system_class=False,
static=False,
name='returnIntArray',
method_id_var_name='returnIntArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='long[]',
system_class=False,
static=False,
name='returnLongArray',
method_id_var_name='returnLongArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='double[]',
system_class=False,
static=False,
name='returnDoubleArray',
method_id_var_name='returnDoubleArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='Object[]',
system_class=False,
static=False,
name='returnObjectArray',
method_id_var_name='returnObjectArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='byte[][]',
system_class=False,
static=False,
name='returnArrayOfByteArray',
method_id_var_name='returnArrayOfByteArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='Bitmap.CompressFormat',
system_class=False,
static=False,
name='getCompressFormat',
method_id_var_name='getCompressFormat',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='List<Bitmap.CompressFormat>',
system_class=False,
static=False,
name='getCompressFormatList',
method_id_var_name='getCompressFormatList',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
]
self.assertListEquals(golden_called_by_natives, called_by_natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
[], called_by_natives, [],
TestOptions())
self.assertGoldenTextEquals(h.GetContent())
def testCalledByNativeParseError(self):
try:
jni_generator.ExtractCalledByNatives("""
@CalledByNative
public static int foo(); // This one is fine
@CalledByNative
scooby doo
""")
self.fail('Expected a ParseError')
except jni_generator.ParseError, e:
self.assertEquals(('@CalledByNative', 'scooby doo'), e.context_lines)
def testFullyQualifiedClassName(self):
contents = """
// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.browser;
import org.chromium.base.BuildInfo;
"""
self.assertEquals('org/chromium/content/browser/Foo',
jni_generator.ExtractFullyQualifiedJavaClassName(
'org/chromium/content/browser/Foo.java', contents))
self.assertEquals('org/chromium/content/browser/Foo',
jni_generator.ExtractFullyQualifiedJavaClassName(
'frameworks/Foo.java', contents))
self.assertRaises(SyntaxError,
jni_generator.ExtractFullyQualifiedJavaClassName,
'com/foo/Bar', 'no PACKAGE line')
def testMethodNameMangling(self):
self.assertEquals('closeV',
jni_generator.GetMangledMethodName('close', [], 'void'))
self.assertEquals('readI_AB_I_I',
jni_generator.GetMangledMethodName('read',
[Param(name='p1',
datatype='byte[]'),
Param(name='p2',
datatype='int'),
Param(name='p3',
datatype='int'),],
'int'))
self.assertEquals('openJIIS_JLS',
jni_generator.GetMangledMethodName('open',
[Param(name='p1',
datatype='java/lang/String'),],
'java/io/InputStream'))
def testFromJavaPGenerics(self):
contents = """
public abstract class java.util.HashSet<T> extends java.util.AbstractSet<E>
implements java.util.Set<E>, java.lang.Cloneable, java.io.Serializable {
public void dummy();
Signature: ()V
}
"""
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
TestOptions())
self.assertEquals(1, len(jni_from_javap.called_by_natives))
self.assertGoldenTextEquals(jni_from_javap.GetContent())
def testSnippnetJavap6_7_8(self):
content_javap6 = """
public class java.util.HashSet {
public boolean add(java.lang.Object);
Signature: (Ljava/lang/Object;)Z
}
"""
content_javap7 = """
public class java.util.HashSet {
public boolean add(E);
Signature: (Ljava/lang/Object;)Z
}
"""
content_javap8 = """
public class java.util.HashSet {
public boolean add(E);
descriptor: (Ljava/lang/Object;)Z
}
"""
jni_from_javap6 = jni_generator.JNIFromJavaP(content_javap6.split('\n'),
TestOptions())
jni_from_javap7 = jni_generator.JNIFromJavaP(content_javap7.split('\n'),
TestOptions())
jni_from_javap8 = jni_generator.JNIFromJavaP(content_javap8.split('\n'),
TestOptions())
self.assertTrue(jni_from_javap6.GetContent())
self.assertTrue(jni_from_javap7.GetContent())
self.assertTrue(jni_from_javap8.GetContent())
# Ensure the javap7 is correctly parsed and uses the Signature field rather
# than the "E" parameter.
self.assertTextEquals(jni_from_javap6.GetContent(),
jni_from_javap7.GetContent())
# Ensure the javap8 is correctly parsed and uses the descriptor field.
self.assertTextEquals(jni_from_javap7.GetContent(),
jni_from_javap8.GetContent())
def testFromJavaP(self):
contents = self._ReadGoldenFile(os.path.join(os.path.dirname(sys.argv[0]),
'testInputStream.javap'))
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
TestOptions())
self.assertEquals(10, len(jni_from_javap.called_by_natives))
self.assertGoldenTextEquals(jni_from_javap.GetContent())
def testConstantsFromJavaP(self):
for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']:
contents = self._ReadGoldenFile(os.path.join(os.path.dirname(sys.argv[0]),
f))
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
TestOptions())
self.assertEquals(86, len(jni_from_javap.called_by_natives))
self.assertGoldenTextEquals(jni_from_javap.GetContent())
def testREForNatives(self):
# We should not match "native SyncSetupFlow" inside the comment.
test_data = """
/**
* Invoked when the setup process is complete so we can disconnect from the
* native-side SyncSetupFlowHandler.
*/
public void destroy() {
Log.v(TAG, "Destroying native SyncSetupFlow");
if (mNativeSyncSetupFlow != 0) {
nativeSyncSetupEnded(mNativeSyncSetupFlow);
mNativeSyncSetupFlow = 0;
}
}
private native void nativeSyncSetupEnded(
int nativeAndroidSyncSetupFlowHandler);
"""
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'foo/bar', TestOptions())
def testRaisesOnNonJNIMethod(self):
test_data = """
class MyInnerClass {
private int Foo(int p0) {
}
}
"""
self.assertRaises(SyntaxError,
jni_generator.JNIFromJavaSource,
test_data, 'foo/bar', TestOptions())
def testJniSelfDocumentingExample(self):
script_dir = os.path.dirname(sys.argv[0])
content = file(os.path.join(script_dir,
'java/src/org/chromium/example/jni_generator/SampleForTests.java')
).read()
golden_file = os.path.join(script_dir, 'golden_sample_for_tests_jni.h')
golden_content = file(golden_file).read()
jni_from_java = jni_generator.JNIFromJavaSource(
content, 'org/chromium/example/jni_generator/SampleForTests',
TestOptions())
generated_text = jni_from_java.GetContent()
if not self.compareText(golden_content, generated_text):
if os.environ.get(REBASELINE_ENV):
with file(golden_file, 'w') as f:
f.write(generated_text)
return
self.fail('testJniSelfDocumentingExample')
def testNoWrappingPreprocessorLines(self):
test_data = """
package com.google.lookhowextremelylongiam.snarf.icankeepthisupallday;
class ReallyLongClassNamesAreAllTheRage {
private static native int nativeTest();
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, ('com/google/lookhowextremelylongiam/snarf/'
'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'),
TestOptions())
jni_lines = jni_from_java.GetContent().split('\n')
line = filter(lambda line: line.lstrip().startswith('#ifndef'),
jni_lines)[0]
self.assertTrue(len(line) > 80,
('Expected #ifndef line to be > 80 chars: ', line))
def testImports(self):
import_header = """
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.app;
import android.app.Service;
import android.content.Context;
import android.content.Intent;
import android.graphics.SurfaceTexture;
import android.os.Bundle;
import android.os.IBinder;
import android.os.ParcelFileDescriptor;
import android.os.Process;
import android.os.RemoteException;
import android.util.Log;
import android.view.Surface;
import java.util.ArrayList;
import org.chromium.base.annotations.CalledByNative;
import org.chromium.base.annotations.JNINamespace;
import org.chromium.content.app.ContentMain;
import org.chromium.content.browser.SandboxedProcessConnection;
import org.chromium.content.common.ISandboxedProcessCallback;
import org.chromium.content.common.ISandboxedProcessService;
import org.chromium.content.common.WillNotRaise.AnException;
import org.chromium.content.common.WillRaise.AnException;
import static org.chromium.Bar.Zoo;
class Foo {
public static class BookmarkNode implements Parcelable {
}
public interface PasswordListObserver {
}
}
"""
jni_generator.JniParams.SetFullyQualifiedClass(
'org/chromium/content/app/Foo')
jni_generator.JniParams.ExtractImportsAndInnerClasses(import_header)
self.assertTrue('Lorg/chromium/content/common/ISandboxedProcessService' in
jni_generator.JniParams._imports)
self.assertTrue('Lorg/chromium/Bar/Zoo' in
jni_generator.JniParams._imports)
self.assertTrue('Lorg/chromium/content/app/Foo$BookmarkNode' in
jni_generator.JniParams._inner_classes)
self.assertTrue('Lorg/chromium/content/app/Foo$PasswordListObserver' in
jni_generator.JniParams._inner_classes)
self.assertEquals('Lorg/chromium/content/app/ContentMain$Inner;',
jni_generator.JniParams.JavaToJni('ContentMain.Inner'))
self.assertRaises(SyntaxError,
jni_generator.JniParams.JavaToJni,
'AnException')
def testJniParamsJavaToJni(self):
self.assertTextEquals('I', JniParams.JavaToJni('int'))
self.assertTextEquals('[B', JniParams.JavaToJni('byte[]'))
self.assertTextEquals(
'[Ljava/nio/ByteBuffer;', JniParams.JavaToJni('java/nio/ByteBuffer[]'))
def testNativesLong(self):
test_options = TestOptions()
test_options.ptr_type = 'long'
test_data = """"
private native void nativeDestroy(long nativeChromeBrowserProvider);
"""
jni_generator.JniParams.ExtractImportsAndInnerClasses(test_data)
natives = jni_generator.ExtractNatives(test_data, test_options.ptr_type)
golden_natives = [
NativeMethod(return_type='void', static=False, name='Destroy',
params=[Param(datatype='long',
name='nativeChromeBrowserProvider')],
java_class_name=None,
type='method',
p0_type='ChromeBrowserProvider',
ptr_type=test_options.ptr_type),
]
self.assertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], test_options)
self.assertGoldenTextEquals(h.GetContent())
def runNativeExportsOption(self, optional):
test_data = """
package org.chromium.example.jni_generator;
/** The pointer to the native Test. */
long nativeTest;
class Test {
private static native int nativeStaticMethod(long nativeTest, int arg1);
private native int nativeMethod(long nativeTest, int arg1);
@CalledByNative
private void testMethodWithParam(int iParam);
@CalledByNative
private String testMethodWithParamAndReturn(int iParam);
@CalledByNative
private static int testStaticMethodWithParam(int iParam);
@CalledByNative
private static double testMethodWithNoParam();
@CalledByNative
private static String testStaticMethodWithNoParam();
class MyInnerClass {
@NativeCall("MyInnerClass")
private native int nativeInit();
}
class MyOtherInnerClass {
@NativeCall("MyOtherInnerClass")
private native int nativeInit();
}
}
"""
options = TestOptions()
options.native_exports = True
options.native_exports_optional = optional
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/example/jni_generator/SampleForTests', options)
return jni_from_java.GetContent()
def testNativeExportsOption(self):
content = self.runNativeExportsOption(False)
self.assertGoldenTextEquals(content)
def testNativeExportsOptionalOption(self):
content = self.runNativeExportsOption(True)
self.assertGoldenTextEquals(content)
def testOuterInnerRaises(self):
test_data = """
package org.chromium.media;
@CalledByNative
static int getCaptureFormatWidth(VideoCapture.CaptureFormat format) {
return format.getWidth();
}
"""
def willRaise():
jni_generator.JNIFromJavaSource(
test_data,
'org/chromium/media/VideoCaptureFactory',
TestOptions())
self.assertRaises(SyntaxError, willRaise)
def testSingleJNIAdditionalImport(self):
test_data = """
package org.chromium.foo;
@JNIAdditionalImport(Bar.class)
class Foo {
@CalledByNative
private static void calledByNative(Bar.Callback callback) {
}
private static native void nativeDoSomething(Bar.Callback callback);
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
self.assertGoldenTextEquals(jni_from_java.GetContent())
def testMultipleJNIAdditionalImport(self):
test_data = """
package org.chromium.foo;
@JNIAdditionalImport({Bar1.class, Bar2.class})
class Foo {
@CalledByNative
private static void calledByNative(Bar1.Callback callback1,
Bar2.Callback callback2) {
}
private static native void nativeDoSomething(Bar1.Callback callback1,
Bar2.Callback callback2);
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
self.assertGoldenTextEquals(jni_from_java.GetContent())
def TouchStamp(stamp_path):
dir_name = os.path.dirname(stamp_path)
if not os.path.isdir(dir_name):
os.makedirs()
with open(stamp_path, 'a'):
os.utime(stamp_path, None)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args(argv[1:])
test_result = unittest.main(argv=argv[0:1], exit=False)
if test_result.result.wasSuccessful() and options.stamp:
TouchStamp(options.stamp)
return not test_result.result.wasSuccessful()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| heke123/chromium-crosswalk | base/android/jni_generator/jni_generator_tests.py | Python | bsd-3-clause | 39,812 |
import json
import mock
from sentry.plugins.helpers import get_option, set_option
from sentry.testutils import TestCase
from sentry.models import set_sentry_version, Option
from sentry.tasks.check_update import check_update, PYPI_URL
class CheckUpdateTest(TestCase):
OLD = '5.0.0'
CURRENT = '5.5.0-DEV'
NEW = '1000000000.5.1'
KEY = 'sentry:latest_version'
def test_run_check_update_task(self):
with mock.patch('sentry.tasks.check_update.fetch_url_content') as fetch:
fetch.return_value = (
None, None, json.dumps({'info': {'version': self.NEW}})
)
check_update() # latest_version > current_version
fetch.assert_called_once_with(PYPI_URL)
self.assertEqual(get_option(key=self.KEY), self.NEW)
def test_run_check_update_task_with_bad_response(self):
with mock.patch('sentry.tasks.check_update.fetch_url_content') as fetch:
fetch.return_value = (None, None, '')
check_update() # latest_version == current_version
fetch.assert_called_once_with(PYPI_URL)
self.assertEqual(get_option(key=self.KEY), None)
def test_set_sentry_version_empty_latest(self):
set_sentry_version(latest=self.NEW)
self.assertEqual(get_option(key=self.KEY), self.NEW)
def test_set_sentry_version_new(self):
set_option(self.KEY, self.OLD)
with mock.patch('sentry.get_version') as get_version:
get_version.return_value = self.CURRENT
set_sentry_version(latest=self.NEW)
self.assertEqual(Option.objects.get_value(key=self.KEY), self.NEW)
def test_set_sentry_version_old(self):
set_option(self.KEY, self.NEW)
with mock.patch('sentry.get_version') as get_version:
get_version.return_value = self.CURRENT
set_sentry_version(latest=self.OLD)
self.assertEqual(Option.objects.get_value(key=self.KEY), self.NEW)
| beni55/sentry | tests/sentry/tasks/check_update/tests.py | Python | bsd-3-clause | 1,970 |
"""
Tests for structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.datasets import macrodata
from statsmodels.tsa.statespace import structural
from statsmodels.tsa.statespace.structural import UnobservedComponents
from .results import results_structural
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
def run_ucm(name):
true = getattr(results_structural, name)
for model in true['models']:
kwargs = model.copy()
kwargs.update(true['kwargs'])
# Make a copy of the data
values = dta.copy()
freq = kwargs.pop('freq', None)
if freq is not None:
values.index = pd.date_range(start='1959-01-01', periods=len(dta),
freq=freq)
# Test pandas exog
if 'exog' in kwargs:
# Default value here is pd.Series object
exog = np.log(values['realgdp'])
# Also allow a check with a 1-dim numpy array
if kwargs['exog'] == 'numpy':
exog = exog.values.squeeze()
kwargs['exog'] = exog
# Create the model
mod = UnobservedComponents(values['unemp'], **kwargs)
# Smoke test for starting parameters, untransform, transform
# Also test that transform and untransform are inverses
mod.start_params
assert_allclose(mod.start_params, mod.transform_params(mod.untransform_params(mod.start_params)))
# Fit the model at the true parameters
res_true = mod.filter(true['params'])
# Check that the cycle bounds were computed correctly
freqstr = freq[0] if freq is not None else values.index.freqstr[0]
if freqstr == 'A':
cycle_period_bounds = (1.5, 12)
elif freqstr == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freqstr == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
# Test that the cycle frequency bound is correct
assert_equal(mod.cycle_frequency_bound,
(2*np.pi / cycle_period_bounds[1],
2*np.pi / cycle_period_bounds[0])
)
# Test that the likelihood is correct
rtol = true.get('rtol', 1e-7)
atol = true.get('atol', 0)
assert_allclose(res_true.llf, true['llf'], rtol=rtol, atol=atol)
# Smoke test for plot_components
if have_matplotlib:
fig = res_true.plot_components()
plt.close(fig)
# Now fit the model via MLE
with warnings.catch_warnings(record=True) as w:
res = mod.fit(disp=-1)
# If we found a higher likelihood, no problem; otherwise check
# that we're very close to that found by R
if res.llf <= true['llf']:
assert_allclose(res.llf, true['llf'], rtol=1e-4)
# Smoke test for summary
res.summary()
def test_irregular():
run_ucm('irregular')
def test_fixed_intercept():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_intercept')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_constant():
run_ucm('deterministic_constant')
def test_random_walk():
run_ucm('random_walk')
def test_local_level():
run_ucm('local_level')
def test_fixed_slope():
run_ucm('fixed_slope')
def test_fixed_slope():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_slope')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_trend():
run_ucm('deterministic_trend')
def test_random_walk_with_drift():
run_ucm('random_walk_with_drift')
def test_local_linear_deterministic_trend():
run_ucm('local_linear_deterministic_trend')
def test_local_linear_trend():
run_ucm('local_linear_trend')
def test_smooth_trend():
run_ucm('smooth_trend')
def test_random_trend():
run_ucm('random_trend')
def test_cycle():
run_ucm('cycle')
def test_seasonal():
run_ucm('seasonal')
def test_reg():
run_ucm('reg')
def test_rtrend_ar1():
run_ucm('rtrend_ar1')
def test_lltrend_cycle_seasonal_reg_ar1():
run_ucm('lltrend_cycle_seasonal_reg_ar1')
def test_mle_reg():
endog = np.arange(100)*1.0
exog = endog*2
# Make the fit not-quite-perfect
endog[::2] += 0.01
endog[1::2] -= 0.01
with warnings.catch_warnings(record=True) as w:
mod1 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=False)
res1 = mod1.fit(disp=-1)
mod2 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=True)
res2 = mod2.fit(disp=-1)
assert_allclose(res1.regression_coefficients.filtered[0, -1], 0.5, atol=1e-5)
assert_allclose(res2.params[1], 0.5, atol=1e-5)
def test_specifications():
endog = [1, 2]
# Test that when nothing specified, a warning is issued and the model that
# is fit is one with irregular=True and nothing else.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog)
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'irregular')
# Test an invalid string trend specification
assert_raises(ValueError, UnobservedComponents, endog, 'invalid spec')
# Test that if a trend component is specified without a level component,
# a warning is issued and a deterministic level component is added
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog, trend=True, irregular=True)
message = ("Trend component specified without level component;"
" deterministic level component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'deterministic trend')
# Test that if a string specification is provided, a warning is issued if
# the boolean attributes are also specified
trend_attributes = ['irregular', 'trend', 'stochastic_level',
'stochastic_trend']
for attribute in trend_attributes:
with warnings.catch_warnings(record=True) as w:
kwargs = {attribute: True}
mod = UnobservedComponents(endog, 'deterministic trend', **kwargs)
message = ("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
assert_equal(str(w[0].message), message)
# Test that a seasonal with period less than two is invalid
assert_raises(ValueError, UnobservedComponents, endog, seasonal=1)
def test_start_params():
# Test that the behavior is correct for multiple exogenous and / or
# autoregressive components
# Parameters
nobs = int(1e4)
beta = np.r_[10, -2]
phi = np.r_[0.5, 0.1]
# Generate data
np.random.seed(1234)
exog = np.c_[np.ones(nobs), np.arange(nobs)*1.0]
eps = np.random.normal(size=nobs)
endog = np.zeros(nobs+2)
for t in range(1, nobs):
endog[t+1] = phi[0] * endog[t] + phi[1] * endog[t-1] + eps[t]
endog = endog[2:]
endog += np.dot(exog, beta)
# Now just test that the starting parameters are approximately what they
# ought to be (could make this arbitrarily precise by increasing nobs,
# but that would slow down the test for no real gain)
mod = UnobservedComponents(endog, exog=exog, autoregressive=2)
assert_allclose(mod.start_params, [1., 0.5, 0.1, 10, -2], atol=1e-1)
def test_forecast():
endog = np.arange(50) + 10
exog = np.arange(50)
mod = UnobservedComponents(endog, exog=exog, level='dconstant')
res = mod.smooth([1e-15, 1])
actual = res.forecast(10, exog=np.arange(50,60)[:,np.newaxis])
desired = np.arange(50,60) + 10
assert_allclose(actual, desired)
| saketkc/statsmodels | statsmodels/tsa/statespace/tests/test_structural.py | Python | bsd-3-clause | 9,003 |
#!/usr/bin/python
# encoding: utf-8
# Jan 2011 (markus kossner) Cleaned up the code, added some documentation
# somwhere around Aug 2008 (markus kossner) created
#
# This script extracts the molecular framework for a database of molecules.
# You can use two modes (hard coded):
# - Scaff: The molecular frame is extracted
# - RedScaff: All linking chains between rings are deleted. The rings are directly connected.
#
# You can comment in/out the code snippets indicated by the comments
# to force each atom of the frame to be a Carbon.
#
# Usage: Frames.py <database.sdf>
# Output:
# - sd files containing all molecules belonging to one frame (1.sdf, 2.sdf etc)
# - frames.smi containing the (caninical) smiles and count of occurrence
#
from __future__ import print_function
import os,sys
from Chem import AllChem as Chem
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all nested sub-sequences (iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def GetFrame(mol, mode='Scaff'):
'''return a ganeric molecule defining the reduced scaffold of the input mol.
mode can be 'Scaff' or 'RedScaff':
Scaff -> chop off the side chains and return the scaffold
RedScaff -> remove all linking chains and connect the rings
directly at the atoms where the linker was
'''
ring = mol.GetRingInfo()
RingAtoms = flatten(ring.AtomRings())
NonRingAtoms = [ atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() not in RingAtoms ]
RingNeighbors = []
Paths = []
for NonRingAtom in NonRingAtoms:
for neighbor in mol.GetAtomWithIdx(NonRingAtom).GetNeighbors():
if neighbor.GetIdx() in RingAtoms:
RingNeighbors.append(NonRingAtom)
Paths.append([neighbor.GetIdx(),NonRingAtom]) #The ring Atoms having a non ring Nieghbor will be the start of a walk
break
PosConnectors = [x for x in NonRingAtoms if x not in RingNeighbors] #Only these Atoms are potential starting points of a Linker chain
#print 'PosConnectors:'
#print PosConnectors
Framework = [ x for x in RingAtoms ]
#Start a list of pathways which we will have to walk
#print 'Path atoms:'
#print Paths
Linkers = []
while len(Paths)>0:
NewPaths = []
for P in Paths:
if P == None:
print('ooh')
else:
for neighbor in mol.GetAtomWithIdx(P[-1]).GetNeighbors():
if neighbor.GetIdx() not in P:
if neighbor.GetIdx() in NonRingAtoms:
n = P[:]
n.append(neighbor.GetIdx())
NewPaths.append(n[:])
elif neighbor.GetIdx() in RingAtoms:
#print 'adding the following path to Framework:'
#print P
n = P[:]
n.append(neighbor.GetIdx())
Linkers.append(n)
Framework=Framework+P[:]
Paths = NewPaths[:]
#print 'Linkers:',Linkers
#print 'RingAtoms:',RingAtoms
#em.AddBond(3,4,Chem.BondType.SINGLE)
if mode == 'RedScaff':
Framework = list(set(Framework))
todel = []
NonRingAtoms.sort(reverse=True)
em = Chem.EditableMol(mol)
BondsToAdd = [ sorted([i[0],i[-1]]) for i in Linkers ]
mem = []
for i in BondsToAdd:
if i not in mem:
em.AddBond(i[0],i[1],Chem.BondType.SINGLE)
mem.append(i)
for i in NonRingAtoms:
todel.append(i)
for i in todel:
em.RemoveAtom(i)
m = em.GetMol()
#===================================#
# Now do the flattening of atoms and bonds!
# Any heavy atom will become a carbon and any bond will become a single bond! #
#===================================#
# for atom in m.GetAtoms(): #
# atom.SetAtomicNum(6) #
# atom.SetFormalCharge(0) #
# for bond in m.GetBonds(): #
# bond.SetBondType(Chem.BondType.SINGLE) #
# Chem.SanitizeMol(m) #
#===================================#
return m
if mode == 'Scaff':
Framework = list(set(Framework))
todel = []
NonRingAtoms.sort(reverse=True)
for i in NonRingAtoms:
if i != None:
if i not in Framework:
todel.append(i)
em = Chem.EditableMol(mol)
for i in todel:
em.RemoveAtom(i)
m = em.GetMol()
#===================================#
# Now do the flattening of atoms and bonds!
# Any heavy atom will become a carbon and any bond will become a single bond!! #
#===================================#
# for atom in m.GetAtoms(): #
# atom.SetAtomicNum(6) #
# atom.SetFormalCharge(0) #
# for bond in m.GetBonds(): #
# bond.SetBondType(Chem.BondType.SINGLE) #
# Chem.SanitizeMol(m) #
#===================================#
return m
if __name__=='__main__':
if len(sys.argv) < 2:
print("No input file provided: Frames.py filetosprocess.ext")
sys.exit(1)
suppl = Chem.SDMolSupplier(sys.argv[1])
FrameDict = {}
for mol in suppl:
m = GetFrame(mol)
cansmiles = Chem.MolToSmiles(m, isomericSmiles=True)
if FrameDict.has_key(cansmiles):
FrameDict[cansmiles].append(mol)
else:
FrameDict[cansmiles]=[mol,]
counter=0
w=open('frames.smi','w')
for key,item in FrameDict.items():
counter+=1
d=Chem.SDWriter(str(counter)+'.sdf')
for i in item:
i.SetProp('Scaffold',key)
i.SetProp('Cluster',str(counter))
d.write(i)
print(key,len(item))
w.write(key+'\t'+str(len(item))+'\n')
w.close
print('number of Clusters: %d' %(counter))
| soerendip42/rdkit | Contrib/M_Kossner/Frames.py | Python | bsd-3-clause | 6,124 |
from django.shortcuts import render
def home(request):
return render(request, 'home.html', {})
| Traviskn/django_starter_template | {{cookiecutter.project_name}}/{{cookiecutter.project_name}}/views.py | Python | mit | 101 |
# Check the various features of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stderr):
# CHECK: cat: does-not-exist: No such file or directory
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Failing Tests (2)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Expected Passes : 3
# CHECK: Expected Failures : 3
# CHECK: Unsupported Tests : 2
# CHECK: Unresolved Tests : 1
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 2
| dbrumley/recfi | llvm-3.3/utils/lit/tests/shtest-format.py | Python | mit | 1,371 |
Subsets and Splits