repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dims/neutron | neutron/common/config.py | 1 | 13000 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import sys
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
import oslo_messaging
from oslo_service import wsgi
from neutron._i18n import _, _LI
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron import policy
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.PortOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions. "
"Note that this can be a colon-separated list of paths. "
"For example: api_extensions_path = "
"extensions:/path/to/more/exts:/even/more/exts. "
"The __path__ of neutron.extensions is appended to "
"this, so if your extensions are in there you don't "
"need to specify them here.")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs. "
"The first 3 octets will remain unchanged. If the 4th "
"octet is not 00, it will also be used. The others "
"will be randomly generated.")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.ListOpt('default_availability_zones', default=[],
help=_("Default value of availability zone hints. The "
"availability zone aware schedulers use this when "
"the resources availability_zone_hints is empty. "
"Multiple availability zones can be specified by a "
"comma separated string. This value can be empty. "
"In this case, even if availability_zone_hints for "
"a resource is empty, availability zone is "
"considered for high availability while scheduling "
"the resource.")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers per subnet")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
deprecated_for_removal=True,
help=_("Maximum number of fixed ips per port. This option "
"is deprecated and will be removed in the N "
"release.")),
cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True,
help=_("Default IPv4 subnet pool to be used for automatic "
"subnet CIDR allocation. "
"Specifies by UUID the pool to be used in case where "
"creation of a subnet is being called without a "
"subnet pool ID. If not set then no pool "
"will be used unless passed explicitly to the subnet "
"create. If no pool is used, then a CIDR must be passed "
"to create a subnet and that subnet will not be "
"allocated from any pool; it will be considered part of "
"the tenant's private address space. This option is "
"deprecated for removal in the N release.")),
cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True,
help=_("Default IPv6 subnet pool to be used for automatic "
"subnet CIDR allocation. "
"Specifies by UUID the pool to be used in case where "
"creation of a subnet is being called without a "
"subnet pool ID. See the description for "
"default_ipv4_subnet_pool for more information. This "
"option is deprecated for removal in the N release.")),
cfg.BoolOpt('ipv6_pd_enabled', default=False,
help=_("Enables IPv6 Prefix Delegation for automatic subnet "
"CIDR allocation. "
"Set to True to enable IPv6 Prefix Delegation for "
"subnet allocation in a PD-capable environment. Users "
"making subnet creation requests for IPv6 subnets "
"without providing a CIDR or subnetpool ID will be "
"given a CIDR via the Prefix Delegation mechanism. "
"Note that enabling PD will override the behavior of "
"the default IPv6 subnetpool.")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('external_dns_driver',
help=_('Driver for external DNS integration.')),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron. "
"Attention: the following parameter MUST be set to "
"False if Neutron is being used in conjunction with "
"Nova security groups.")),
cfg.StrOpt('host', default=utils.get_hostname(),
sample_default='example.domain',
help=_("Hostname to be used by the Neutron server, agents and "
"services running on this machine. All the agents and "
"services running on this machine must use the same "
"host value.")),
cfg.BoolOpt('force_gateway_on_subnet', default=True,
deprecated_for_removal=True,
help=_("Ensure that configured gateway is on subnet. "
"For IPv6, validate only if gateway is not a link "
"local address. Deprecated, to be removed during the "
"Newton release, at which point the gateway will not "
"be forced on to subnet.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.BoolOpt('advertise_mtu', default=True,
help=_('If True, advertise network MTU values if core plugin '
'calculates them. MTU is advertised to running '
'instances via DHCP and RA MTU options.')),
cfg.StrOpt('ipam_driver',
help=_("Neutron IPAM (IP address management) driver to use. "
"If ipam_driver is not set (default behavior), no IPAM "
"driver is used. In order to use the reference "
"implementation of Neutron IPAM driver, "
"use 'internal'.")),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
cfg.StrOpt('web_framework', default='legacy',
choices=('legacy', 'pecan'),
help=_("This will choose the web framework in which to run "
"the Neutron API server. 'pecan' is a new experiemental "
"rewrite of the API server."))
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
wsgi.register_opts(cfg.CONF)
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='neutron')
def set_db_defaults():
# Update the default QueuePool parameters. These can be tweaked by the
# conf variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(
cfg.CONF,
connection='sqlite://',
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
set_db_defaults()
NOVA_CONF_SECTION = 'nova'
ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION)
ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION)
nova_opts = [
cfg.StrOpt('region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.StrOpt('endpoint_type',
default='public',
choices=['public', 'admin', 'internal'],
help=_('Type of the nova endpoint to use. This endpoint will'
' be looked up in the keystone catalog and should be'
' one of public, internal or admin.')),
]
cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
logging.register_options(cfg.CONF)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "neutron"
logging.setup(cfg.CONF, product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))
def reset_service():
# Reset worker in case SIGHUP is called.
# Note that this is called only in case a service is running in
# daemon mode.
setup_logging()
policy.refresh()
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
"""
loader = wsgi.Loader(cfg.CONF)
app = loader.load_app(app_name)
return app
| apache-2.0 | -2,916,700,827,220,835,000 | 46.619048 | 79 | 0.585846 | false |
bacemtayeb/Tierra | src/core/stream.py | 1 | 11392 | import gc
import re
import config
from copy import copy
from colors import color
from textwrap import dedent
from util import Msg, Error, debug, check_opts, eval_type
from collections import OrderedDict, namedtuple
from src.modules.services.service import Service
"""
Main data bus for interacting with the various modules. Dumps information,
initializes objects, and houses all of the objects necessary to
create/get/dump/stop the sniffers/poisoners.
"""
# main struct; ordered dictionary
HOUSE = OrderedDict()
class FailedCheck(Exception):
""" Used primarily for error checking and breaking safely out
of outer loops.
"""
pass
def initialize(module):
""" Initialize a module and load it into the global HOUSE
variable. MODULE should be an instance of the loaded
module.
"""
global HOUSE
debug("Received module start for: %s" % (module.__name__))
if not 'service' in HOUSE:
# services will always be 0
HOUSE['service'] = {}
tmp_mod = module()
# option management interface; i.e. if we need to
# load into another menu
if not tmp_mod.skip_opts:
response = handle_opts(tmp_mod)
else:
response = True
if response:
if hasattr(tmp_mod, 'initialize_bg'):
tmp = tmp_mod.initialize_bg()
else:
tmp = tmp_mod.initialize()
else:
return
if tmp is not None and tmp is not False:
if isinstance(tmp_mod, Service):
HOUSE['service'][tmp_mod.which] = tmp_mod
return
if not tmp_mod.which in HOUSE:
HOUSE[tmp_mod.which] = {}
HOUSE[tmp_mod.which][tmp_mod.session_view()] = tmp_mod
def display_options(options, settings):
""" Given a module's options and the column
headers, generate a table, print it, and return
the completed table.
"""
table = []
for (idx, opt) in enumerate(options.keys()):
tmp = []
tmp.append(idx + 1)
tmp.append(options[opt].display)
tmp.append(options[opt].getStr())
tmp.append(options[opt].type)
tmp.append(options[opt].required)
table.append(tmp)
if len(table) > 0:
config.pptable([settings] + table)
else:
Msg('\tModule has no options.')
print color.B_YELLOW + '0' + color.B_GREEN + ') ' + color.B_WHITE + 'Back' + color.END
return table
def handle_opts(module):
""" The user has selected a module, so we should parse out all the
options for this particular module, set the config, and when
requested, run it. This is kinda messy, but works for now.
"""
# fetch generic module options and module-specific options
options = module.config
# dump module settings
Setting = ['', 'Option', 'Value', 'Type', 'Required']
table = display_options(options, Setting)
while True:
# fetch command/option
try:
choice = raw_input('%s > ' % (color.B_WHITE + module.which + color.END))
# first check global commands
tmp = check_opts(choice)
if tmp == -1:
continue
# check module commands
if choice is "0":
return False
elif choice == "info":
if module.info is None:
Msg("Module has no information available")
continue
print '%s%s%s' % (color.GREEN,
'-' * len(module.info.split('\n')[1].strip()),
color.END),
print dedent(module.info.rstrip())
print '%s%s%s' % (color.GREEN,
'-' * len(module.info.split('\n')[1].strip()),
color.END)
elif choice == "ops":
display_options(options, Setting)
continue
elif len(choice.split(' ')) > 1:
choice = choice.split(' ')
try:
if int(choice[0]) > len(table):
continue
elif int(choice[0]) is 0:
return False
key = options.keys()[int(choice[0])-1]
if choice[1] == 'o' and module.config[key].opts is not None:
Msg("Options: %s" % module.config[key].opts)
continue
elif choice[1] == 'o' and module.config[key].type == 'list':
Msg('%s' % module.config[key].value)
continue
# generate a temporary zoption
tmp = copy(module.config[key])
tmp.value = ' '.join(choice[1::])
# we've got a valid number, validate the type and set it
if not tmp.validate():
Error('Wrong type assigned. Expected value of type "%s"'%
options[key].type)
else:
module.config[key] = tmp
except Exception, e:
Error('%s' % e)
continue
elif "r" in choice.lower() or "run" in choice.lower():
# verify all required options are set
for opt in options.keys():
if options[opt].required and options[opt].value is None:
Error('Option \'%s\' is required.'%opt)
raise FailedCheck
return True
except KeyboardInterrupt:
return False
except FailedCheck:
continue
except Exception, e:
Error('%s' % e)
def dump_sessions():
"""Format and print the currently running modules.
"""
global HOUSE
print color.B_GREEN + '\n\t[' + color.B_YELLOW + 'Running sessions' + \
color.B_GREEN + ']' + color.END
if 'service' in HOUSE:
# services first
tmp = HOUSE['service']
if len(tmp) > 0:
print color.B_GREEN + '\t[' + color.B_YELLOW + '0' + color.B_GREEN + \
'] ' + color.B_WHITE + 'Services' + color.END
for (cnt, service) in enumerate(tmp):
print color.B_GREEN + '\t\t[' + color.B_YELLOW + str(cnt) + color.B_GREEN + \
'] ' + color.B_WHITE + tmp[service].session_view() + color.END
if tmp[service].log_data:
print color.B_YELLOW + '\t\t\t--> ' + color.B_WHITE + 'Logging to ' + \
tmp[service].log_file.name + color.END
for (cnt, key) in enumerate(HOUSE.keys()):
if key is 'service':
continue
if len(HOUSE[key]) > 0:
print color.B_GREEN + '\t[' + color.B_YELLOW + str(cnt) + color.B_GREEN + \
']' + color.B_WHITE + ' ' + key + color.END
for (cnt, obj) in enumerate(HOUSE[key]):
print color.B_GREEN + '\t\t[' + color.B_YELLOW + str(cnt) + color.B_GREEN + \
'] ' + color.B_WHITE + HOUSE[key][obj].session_view() + color.END
if hasattr(HOUSE[key][obj], 'log_data'):
if HOUSE[key][obj].log_data:
print color.B_YELLOW + '\t\t\t--> ' + color.B_WHITE + 'Logging to ' + \
HOUSE[key][obj].log_file.name + color.END
print '\n'
def dump_module_sessions(module):
"""Dump running sessions for a module.
@param module is the module to dump.
"""
global HOUSE
if not module in HOUSE.keys():
Error('Module \'%s\' not found.' % module)
return
else:
mod = HOUSE[module]
print color.B_YELLOW + '[' + color.B_RED + '!' + color.B_YELLOW + '] ' + \
color.B_WHITE + module
for (cnt, obj) in enumerate(mod.keys()):
print color.B_GREEN + '\t[' + color.B_YELLOW + str(cnt) + color.B_GREEN + '] ' + \
color.B_WHITE + str(obj)
def get_session_count():
""" Return a count of the number of running sessions
"""
global HOUSE
cnt = 0
if len(HOUSE.keys()) > 0:
for key in HOUSE.keys():
for entry in HOUSE[key]:
if HOUSE[key][entry].running:
cnt += 1
return cnt
def stop_session(module, number):
""" Stop a specific session; calls the respective module's
shutdown() method.
@param module is the module number
@param number is the session number
"""
global HOUSE
if module == 'all' and number == -1:
# kill all
for key in HOUSE.keys():
for entry in HOUSE[key]:
HOUSE[key][entry].shutdown()
else:
(mod, mod_inst) = get_mod_num(module, number)
if not mod is None and not mod_inst is None:
HOUSE[mod][mod_inst].shutdown()
del(HOUSE[mod][mod_inst])
if len(HOUSE[mod].keys()) is 0:
del(HOUSE[mod])
else:
return
gc.collect()
def view_session(module, number):
"""Initializes a module's view
@param module is the module number
@param number is the session number
"""
global HOUSE
mod = get_module(module, number)
if hasattr(mod, 'view'):
mod.view()
def toggle_log(module, number, file_loc=None, toggle=False):
"""Toggle the logger of a module
@param module is the module number
@param number is the session number
@param file_loc is a string containing the file path
@param toggle is True to turn on logging or False to turn off
"""
(mod, mod_inst) = get_mod_num(module, number)
if not mod is None and not mod_inst is None and hasattr(HOUSE[mod][mod_inst], 'log'):
if toggle:
# enable
HOUSE[mod][mod_inst].log(True, file_loc)
else:
# disable
HOUSE[mod][mod_inst].log(False)
else:
Error('Module does not have a logger or doesn\'t exist.')
def get_session_input():
""" Helper for obtaining module and session numbers
"""
try:
display = color.B_GREEN + '[' + color.B_YELLOW + 'session' + color.B_GREEN + \
'] [' + color.B_YELLOW + 'number' + color.B_GREEN + ']' + \
color.B_WHITE + ' > '
tmp = raw_input(display)
(module, number) = tmp.split(' ')
if not module is None and not number is None:
return (int(module), int(number))
except Exception:
Error('Must specify [module] followed by [number]\n')
return (None, None)
def get_module(module, number):
""" Retrieve an instance of a running session
@param module is the module number
@param number is the session number
"""
(mod, mod_inst) = get_mod_num(module, number)
if not mod is None and not mod_inst is None:
return HOUSE[mod][mod_inst]
return None
def get_mod_num(module, number):
"""Fetch the module and number instances given their
indexes.
@param module is the module index
@param number is the module session index
"""
if len(HOUSE.keys()) > module:
mod = HOUSE.keys()[module]
if len(HOUSE[mod].keys()) > number:
mod_instance = HOUSE[mod].keys()[number]
return (mod, mod_instance)
return (None, None)
| gpl-3.0 | 4,008,644,137,442,229,000 | 33.313253 | 91 | 0.533971 | false |
nginxinc/kubernetes-ingress | tests/suite/grpc/helloworld_pb2.py | 1 | 3911 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
syntax='proto3',
serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
_GREETER = _descriptor.ServiceDescriptor(
name='Greeter',
full_name='helloworld.Greeter',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=93,
serialized_end=166,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='helloworld.Greeter.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLOREPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name['Greeter'] = _GREETER
# @@protoc_insertion_point(module_scope) | apache-2.0 | -1,847,906,107,374,825,000 | 28.19403 | 369 | 0.722833 | false |
Artanicus/python-cozify | cozify/test/test_cloud_api.py | 1 | 1584 | #!/usr/bin/env python3
import os, pytest, tempfile, datetime
from cozify import cloud_api
from cozify.test import debug
from cozify.test.fixtures import *
from cozify.Error import AuthenticationError, APIError, ConnectionError
from mbtest.imposters import Imposter, Predicate, Stub, Response
@pytest.mark.mbtest
def test_cloud_api_mock_lan_ip(mock_server):
imposter = Imposter(Stub(Predicate(path="/hub/lan_ip"), Response(body='[ "127.0.0.1" ]')))
with mock_server(imposter):
assert cloud_api.lan_ip(base=imposter.url)
@pytest.mark.mbtest
def test_cloud_api_timeout(mock_server):
imposter = Imposter(
Stub(Predicate(path="/hub/lan_ip"), Response(body='[ "127.0.0.1" ]', wait=6000)))
with pytest.raises(ConnectionError) as e_info:
with mock_server(imposter):
cloud_api.lan_ip(base=imposter.url)
@pytest.mark.mbtest
def test_cloud_api_emaillogin(mock_server, tmp_cloud):
imposter = Imposter(Stub(Predicate(path="/user/emaillogin"), Response(body=tmp_cloud.token)))
with mock_server(imposter):
token = cloud_api.emaillogin(email=tmp_cloud.email, otp='42', base=imposter.url)
assert isinstance(token, str)
@pytest.mark.mbtest
def test_cloud_api_requestlogin(mock_server, tmp_cloud):
imposter = Imposter(
Stub(
Predicate(method=Predicate.Method.POST) & Predicate(path="/user/requestlogin")
& Predicate(query={"email": tmp_cloud.email}), Response(body='null')))
with mock_server(imposter):
cloud_api.requestlogin(email=tmp_cloud.email, base=imposter.url)
| mit | 2,131,560,475,806,072,800 | 35 | 97 | 0.702652 | false |
ercanezin/ce888labs | lab8/imdb.py | 1 | 2192 |
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Model
from keras.layers import Dense, Activation, Embedding, GlobalMaxPooling1D,Convolution1D, Input,LSTM,merge
from keras.datasets import imdb
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
###PREPROCCESSING
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print (X_train[0])
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
###PREPROCCESSING ENDS
inputs = Input(shape=(maxlen,))
m = inputs
m = Embedding(max_features, 128, dropout=0.2)(m)
x = Convolution1D(nb_filter=32, filter_length=4, border_mode='valid',activation='relu', subsample_length=1)(m)
x = GlobalMaxPooling1D()(x)
y=LSTM(70)(m)
z=merge([x, y], mode='concat', concat_axis=1)
z = Dense(1)(z)
predictions = Activation("sigmoid")(z)
model = Model(input=inputs, output=predictions)
#
# model = Sequential()
# model.add(Embedding(max_features, embedding_size, input_length=maxlen))
# model.add(Dropout(0.25))
# model.add(Convolution1D(nb_filter=nb_filter,
# filter_length=filter_length,
# border_mode='valid',
# activation='relu',
# subsample_length=1))
# model.add(MaxPooling1D(pool_length=pool_length))
# model.add(LSTM(lstm_output_size))
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc) | gpl-3.0 | 8,493,642,314,764,031,000 | 24.206897 | 110 | 0.69115 | false |
zergov/flashcards | flashcards/sets.py | 1 | 3877 | """
flashcards.sets
~~~~~~~~~~~~~~~~~~~
Contain the StudySet object and logic related to it.
"""
from collections import OrderedDict
from flashcards import cards
from flashcards.cards import StudyCard
TITLE_KEY = 'title'
DESC_KEY = 'description'
CARDS_KEY = 'cards'
def create_from_dict(data):
"""
Construct a StudySet Object from a dictionary object.
:param data: the dictionary object
:raises KeyError: when dictionary is missing a needed field to create obj
:raises ValueError: if cards field in data is not of type list
:returns: StudySet object
"""
_assert_data_is_valid(data)
title = data[TITLE_KEY]
description = data[DESC_KEY]
study_cards = [cards.create_from_dict(card) for card in data[CARDS_KEY]]
study_set = StudySet(title, description)
for card in study_cards:
study_set.add(card)
return study_set
def _assert_data_is_valid(data):
""" Check that data received in `create_from_dict` has a valid format """
if TITLE_KEY not in data:
raise KeyError("Invalid data string. %s key is missing" % TITLE_KEY)
if DESC_KEY not in data:
raise KeyError("Invalid data string. %s key is missing" % DESC_KEY)
if CARDS_KEY not in data:
raise KeyError("Invalid data string. %s key is missing" % CARDS_KEY)
if not isinstance(data[CARDS_KEY], list):
raise ValueError("Invalid data type. %s value's should be a list"
% CARDS_KEY)
class StudySet(object):
"""
A StudySet is a container of flash cards.
"""
def __init__(self, title, description=None):
"""
Creates a Study set.
:param title: The title of the study set.
:param description: The description for this study set.
"""
self._title = title
self._description = '' if description is None else description
self._cards = []
def __iter__(self):
"""Iter through the cards of this set."""
return iter(self._cards)
def __len__(self):
"""Return the number of cards in this StudySet."""
return len(self._cards)
@property
def title(self):
"""
Get the title of this set.
:returns: The title of this Study set.
"""
return self._title
@title.setter
def title(self, value):
"""
Set the title of this set.
:param value: The new title for this set
"""
if isinstance(value, basestring):
self._title = value
else:
raise TypeError("StudySet title should be of type str")
@property
def description(self):
"""
Get the description of this set.
"""
return self._description
@description.setter
def description(self, value):
"""
Set the description of this set.
:param value: The new description for this set
"""
if isinstance(value, basestring):
self._description = value
else:
raise TypeError("StudySet description should be of type str")
def add(self, card):
"""
Add a card to the end of this set.
:param card: A subclass of flashcards.cards.StudyCard object.
"""
if isinstance(card, StudyCard):
self._cards.append(card)
else:
raise TypeError("A Set can only contain instances of "
"StudyCard objects.")
def to_dict(self):
"""
Get a dictionary object representing this StudySet.
:returns: a dictionary object representation of this StudySet.
"""
serialized_cards = [c.to_dict() for c in self]
data = ((TITLE_KEY, self.title),
(DESC_KEY, self.description),
(CARDS_KEY, serialized_cards))
return OrderedDict(data)
| mit | -994,116,634,628,581,500 | 25.923611 | 77 | 0.594532 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/search_term_view.py | 1 | 2213 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import search_term_targeting_status
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'SearchTermView',
},
)
class SearchTermView(proto.Message):
r"""A search term view with metrics aggregated by search term at
the ad group level.
Attributes:
resource_name (str):
Output only. The resource name of the search term view.
Search term view resource names have the form:
``customers/{customer_id}/searchTermViews/{campaign_id}~{ad_group_id}~{URL-base64_search_term}``
search_term (str):
Output only. The search term.
ad_group (str):
Output only. The ad group the search term
served in.
status (google.ads.googleads.v6.enums.types.SearchTermTargetingStatusEnum.SearchTermTargetingStatus):
Output only. Indicates whether the search
term is currently one of your targeted or
excluded keywords.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
search_term = proto.Field(
proto.STRING,
number=5,
optional=True,
)
ad_group = proto.Field(
proto.STRING,
number=6,
optional=True,
)
status = proto.Field(
proto.ENUM,
number=4,
enum=search_term_targeting_status.SearchTermTargetingStatusEnum.SearchTermTargetingStatus,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -114,439,431,116,269,020 | 29.736111 | 109 | 0.658834 | false |
lpryszcz/REDiscover | taxid2sra.py | 1 | 13105 | #!/usr/bin/env python
desc="""Fetch all entries from SRA for given taxid.
Save the biggest run per each SAMPLE (SRS) from given date. Paired first, if any.
Note, it run fastq-dump in background. Make sure you have enough free cores;)
DEPENDENCIES:
Biopython
"""
epilog="""Author:
[email protected]
Barcelona, 2/10/2012
"""
import argparse, os, re, sys, gzip
from datetime import datetime
from ftplib import FTP
from Bio import Entrez
import xml.etree.ElementTree as ET
def srr2info(srr):
"""Return info for SRR entry
- experiment id
- submission id
- project id
- biosample id
- run date
- bases
- insert size
- insert std
- reads orientation
"""
'''
for child in root[0]: print child.tag, child.attrib
EXPERIMENT {'center_name': 'BI', 'alias': '74116.WR23613.Solexa-42619.62C7UAAXX100916.P', 'accession': 'SRX026545'}
SUBMISSION {'submission_date': '2009-06-01T02:01:25Z', 'lab_name': 'Genome Sequencing', 'submission_comment': 'Produced by user cristyn on Sun May 31 22:01:25 EDT 2009', 'alias': 'BI.Streptococcus_pyogenes_Pathogenomics', 'center_name': 'BI', 'accession': 'SRA008647'}
STUDY {'center_name': 'BI', 'alias': 'Fusarium_oxysporum_Diversity_RNA_Sequencing_multi_isolate', 'accession': 'SRP002351'}
SAMPLE {'center_name': 'BI', 'alias': '74336.0', 'accession': 'SRS190364'}
RUN_SET {}
root[0][0].keys()
['center_name', 'alias', 'accession']
'''
#search NCBI
result = Entrez.read( Entrez.esearch(db="sra",term=srr ) )
if not result['IdList']:
sys.stderr.write( " Entrez Error: No results for %s\n" % srr )
return
elif len(result['IdList'])>1:
sys.stderr.write( " Entrez Warning: Multiple hits for %s: %s\n" % (srr,",".join(result['IdList'])) )
#fetch info from NCBI
xml = Entrez.efetch( db="sra",id=result['IdList'][0] ).read()
root = ET.fromstring(xml)#; print xml
#get experiment
EXPERIMENT = root[0].find("EXPERIMENT")
srx = EXPERIMENT.attrib['accession']
#get submission
s = root[0].find("SUBMISSION")
sra = s.attrib['accession']
#get accession
s = root[0].find("STUDY")
srp = s.attrib['accession']
#get accession
s = root[0].find("SAMPLE")
srs = s.attrib['accession']
s = root[0].find('RUN_SET') #it's within RUN_SET
date = s[0].attrib['run_date']
bases = s[0].attrib['total_bases']
#LIBRARY_LAYOUT - maybe try to simplify it
isize=istdv=orient = 0
DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout
LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR")
LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT")
PAIRED = LIBRARY_LAYOUT.find("PAIRED")
if PAIRED is not None:
layout = PAIRED.attrib
isize = layout['NOMINAL_LENGTH'] # NOMINAL_LENGTH="476"
orient = layout['ORIENTATION'] # ORIENTATION="5\'3\'-3\'5\'
istdv = layout['NOMINAL_SDEV'] ## PAIRED NOMINAL_SDEV="149.286"
return ( srx,sra,srp,srs,date,bases,isize,istdv,orient )
def xml2data(child, taxid2srs, verbose):
""" """
#get experiment
EXPERIMENT = child.find("EXPERIMENT")
srx = EXPERIMENT.attrib['accession']
#get submission
s = child.find("SUBMISSION")
sra = s.attrib['accession']
#get accession
s = child.find("STUDY")
srp = s.attrib['accession']
#get accession
for SAMPLE in child.findall("SAMPLE"):
#if SAMPLE.attrib['accession']!=
srs = SAMPLE.attrib['accession']
#get taxid
SAMPLE_NAME = SAMPLE.find("SAMPLE_NAME")
TAXON_ID = SAMPLE_NAME.find("TAXON_ID")
taxid = int(TAXON_ID.text)
SCIENTIFIC_NAME = SAMPLE_NAME.find("SCIENTIFIC_NAME")
#malformed xml?
if SCIENTIFIC_NAME is None:
return taxid2srs
strain = SCIENTIFIC_NAME.text
strain0 = tissue = stage = ""
#get strain tag - this may cause problems with non-ENA accessions!
SAMPLE_ATTRIBUTES = SAMPLE.find("SAMPLE_ATTRIBUTES")
if SAMPLE_ATTRIBUTES is None:
continue
for SAMPLE_ATTRIBUTE in SAMPLE_ATTRIBUTES.findall("SAMPLE_ATTRIBUTE"):
#print SAMPLE_ATTRIBUTE.find("TAG").text
if SAMPLE_ATTRIBUTE.find("TAG").text == "strain":
#print SAMPLE_ATTRIBUTE.find("VALUE")
strain += " %s" % SAMPLE_ATTRIBUTE.find("VALUE").text
strain0 = SAMPLE_ATTRIBUTE.find("VALUE").text
elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-OrganismPart":
tissue = SAMPLE_ATTRIBUTE.find("VALUE").text
elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-StrainOrLine":
strain0 = SAMPLE_ATTRIBUTE.find("VALUE").text
elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-DevelopmentalStage":
stage = SAMPLE_ATTRIBUTE.find("VALUE").text
if strain!="unidentified organism":
break
# get tissue
#LIBRARY_LAYOUT - maybe try to simplify it
DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout
LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR")
LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT")
LIBRARY_CONSTRUCTION_PROTOCOL = LIBRARY_DESCRIPTOR.find("LIBRARY_CONSTRUCTION_PROTOCOL")# RNA-seq dUTP eukaryotic
stranded = ""
if LIBRARY_CONSTRUCTION_PROTOCOL is not None and LIBRARY_CONSTRUCTION_PROTOCOL.text is not None:
stranded = re.sub('[ \t\n\r]+', ' ', LIBRARY_CONSTRUCTION_PROTOCOL.text)
orient = ""
isize = istdv = 0
PAIRED = LIBRARY_LAYOUT.find("PAIRED")
if PAIRED is not None:
layout = PAIRED.attrib
if 'NOMINAL_LENGTH' in layout: isize = float(layout['NOMINAL_LENGTH']) # NOMINAL_LENGTH="476"
if 'NOMINAL_SDEV' in layout: istdv = float(layout['NOMINAL_SDEV']) ##PAIRED NOMINAL_SDEV="149.286"
if 'ORIENTATION' in layout: orient = layout['ORIENTATION'] #ORIENTATION="5\'3\'-3\'5\'
#run data
runs = []
RUN_SET = child.find('RUN_SET') #it's within RUN_SET
for RUN in RUN_SET.findall("RUN"):
srr = RUN.attrib['accession']
date = assembly = ""
bases = size = 0
if 'size' in RUN.attrib: size = RUN.attrib['size']
if 'run_date' in RUN.attrib: date = RUN.attrib['run_date']
if 'total_bases' in RUN.attrib: bases = int(RUN.attrib['total_bases'])
if "assembly" in RUN.attrib: assembly = RUN.attrib["assembly"]
runs.append((srr, assembly, size, bases, date))
#store data
childdata = (strain, strain0, tissue, stage, taxid, srx, srp, isize, istdv, orient, stranded, runs)
if verbose:
sys.stderr.write( " %s: %s: %s\n" % (taxid, srs, str(childdata)))
if not taxid in taxid2srs:
taxid2srs[taxid] = {}
if not srs in taxid2srs[taxid]:
taxid2srs[taxid][srs] = []
taxid2srs[taxid][srs].append(childdata)
return taxid2srs
def taxid2runs(outfn, taxid, verbose, db="sra", retmode="xml", retmax=10**6):
"""Return info from SRA for given taxid. """
taxid2srs = {}
#search NCBI
term = 'txid%s[organism] AND sra_public[filter] AND "biomol rna"[Properties]' % taxid
if verbose:
sys.stderr.write("Query: %s\n" % term)
result = Entrez.read(Entrez.esearch(db=db, term=term, retmax=retmax))#; print result
ids = result['IdList']
if not ids:
sys.stderr.write(" Entrez Error: No results for %s\n" % taxid)
return
if verbose:
sys.stderr.write("Downloading %s entries from NCBI %s database...\n" % (len(ids), db))
#post NCBI query
for id in ids:
xmlfn = os.path.join(".xml", "%s.xml.gz"%id)
if os.path.isfile(xmlfn):
xml = "".join(l for l in gzip.open(xmlfn))
else:
xml = Entrez.efetch(db=db, retmode=retmode, id=id).read()#; print xml
with gzip.open(xmlfn, "w") as out:
out.write(xml)
root = ET.fromstring(xml)
child = root[0]
taxid2srs = xml2data(child, taxid2srs, verbose)
#print output
out = open(outfn, "w")
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
header = "#Strain\tStrain0\tTissue\tStage\tTaxid\tSample\tExperiment\tProject\tRun\tInsert size\tOrientation\tStranded\tAssembly\tSize\tBases\tDate\n"
out.write(header)
info = "%s\t"*15+"%s\n"
sys.stderr.write("Saving SRA info to: %s\n" % outfn)
for taxid in taxid2srs:
for srs in taxid2srs[taxid]:
for strain, strain0, tissue, stage, taxid, srx, srp, isize, istdv, orient, stranded, runs in taxid2srs[taxid][srs]:
for srr, assembly, size, bases, date in runs:
line = info%(strain, strain0, tissue, stage, taxid, srs, srx, srp, srr, isize, orient, stranded, assembly, size, bases, date)
out.write(line.encode('ascii', 'xmlcharrefreplace'))
out.close()
return taxid2srs
def get_runs(taxid2srs, ftpdomain, orientth, maxisize, paired, minbases, verbose):
"""Select the best run for each uniq taxid-srs-date combination
"""
if verbose:
sys.stderr.write( "Fetching best run for each uniq taxid-srs-date combination...\n" )
#select the best run for each uniq taxid-srs-date combination
for taxid in taxid2srs:
for srs in taxid2srs[taxid]:
date2runs={}
for strain, taxid, srx, srp, isize, istdv, orient, runs in taxid2srs[taxid][srs]:
#check if paired
if paired:
if not isize:
continue
#skip if wrong orientation
if orientth and orientth!=orient:
continue
#skip big insert size or not paired
if maxisize:
if isize>maxisize:
continue
#add runs passed filtering
for srr,bases,date in runs:
#skip if too small yield
if bases < minbases*10**6:
continue
if date not in date2runs:
date2runs[date]=[]
date2runs[date].append( (srr,srx,srp,isize,bases) )
#process best run for each uniq taxid-srs-date combination
for date in date2runs:
#
fltruns = filter( lambda x: x[3]!=0, date2runs[date] )
if not fltruns:
fltruns = date2runs[date]
#sort by size
bestrun = sorted( fltruns,key=lambda x: x[-1],reverse=True )[0]
#print bestrun,date2runs[date]
srr,srx,srp,isize,bases = bestrun
# fetch
cmd = "fastq-dump --gzip --split-3 -O %s %s" % (outdir, srr)
def main():
usage = "%(prog)s -v"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.1')
parser.add_argument("-d", "--download", default=False, action="store_true",
help="download SRA files")
parser.add_argument("-t", "--taxid", type=int, required=True,
help="taxid of interest " )
parser.add_argument("-f", dest="ftp", default="ftp-trace.ncbi.nih.gov",
help="ftp server address [%(default)s]" )
parser.add_argument("-e", "--email", default="[email protected]", type=str,
help="email address [%(default)s]" )
parser.add_argument("-o", dest="orient", default="5'3'-3'5'",
help="orientation [%(default)s]" )
parser.add_argument("-m", dest="maxisize", default=1000, type=int,
help="max allowed insert [%(default)s]" )
parser.add_argument("-b", dest="minbases", default=600, type=int,
help="min Mbases in run [%(default)s Mbases -> 10x for 60Mb genome]" )
parser.add_argument("-p", "--paired", default=False, action="store_true",
help="fetch only paired runs" )
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
Entrez.email = o.email
if not os.path.isdir(".xml"):
os.makedirs(".xml")
#get all runs for taxid
outfn = "sra.tsv"
taxid2srs = taxid2runs(outfn, o.taxid, o.verbose); return
if o.download:
#fetch best srr
get_runs( taxid2srs,o.ftp,o.orient,o.maxisize,o.paired,o.minbases,o.verbose )
if __name__=='__main__':
t0 = datetime.now()
main()
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt ) | gpl-2.0 | 5,824,388,399,453,832,000 | 42.111842 | 268 | 0.583441 | false |
ZhangXinNan/tensorflow | tensorflow/python/kernel_tests/partitioned_variables_test.py | 1 | 25955 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for partitioned_variables.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.test_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def testFixedSizePartitionerInt64(self):
with self.test_session():
partitioner = partitioned_variables.fixed_size_partitioner(4, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable("v0", dtype=dtypes.int64, shape=[20])
v0_list = v0._get_variable_list()
self.assertEqual(len(v0_list), 4)
def testResourceFixedSizePartitioner(self):
with self.test_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope(
"root", partitioner=partitioner, use_resource=True):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def _testVariableAxisSizePartitioner(self,
name,
axis,
max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testVariableAxisSizePartitioner(self):
with self.test_session():
# Create a partitioned variable of shape (4, 8, 16, 32) type float32
# Bytes per slice along the given axes:
# 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
# 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
# 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
# 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
self._testVariableAxisSizePartitioner(
"v0",
axis=0,
max_shard_bytes=131072,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
self._testVariableAxisSizePartitioner(
"v1",
axis=1,
max_shard_bytes=65536,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
self._testVariableAxisSizePartitioner(
"v2",
axis=2,
max_shard_bytes=32768,
expected_axis_shards=2,
expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
self._testVariableAxisSizePartitioner(
"v3a",
axis=3,
max_shard_bytes=2048,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
# Slice into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
self._testVariableAxisSizePartitioner(
"v3b",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
self._testVariableAxisSizePartitioner(
"v3c",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32),
max_shards=33)
# Specify max_shards so that it will affect sharding.
self._testVariableAxisSizePartitioner(
"v3d",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=2,
expected_partitions=(1, 1, 1, 2),
max_shards=2)
# Use the partitioner with strings
partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner( # pylint: disable=line-too-long
axis=3,
max_shard_bytes=32768,
bytes_per_string_element=8)
with variable_scope.variable_scope(
"root", partitioner=partitioner_axis3_str):
v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
# Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
# which is equal to 4096. Setting a max_shard_bytes of 32768
# and we should get a split of 4.
# Slice into 4 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 32 / 8 = 4
self.assertEqual(len(v3str_list), 4)
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testMinMaxVariablePartitioner(self):
with self.test_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=2 << 10,
var_name="v0_0",
var_shape=[2048],
expected_axis_shards=4,
expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v0",
var_shape=[2048, 1024],
expected_axis_shards=32,
expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
self._testMinMaxVariablePartitioner(
max_partitions=16,
axis=0,
min_slice_size=256 << 10,
var_name="v1_max",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
self._testMinMaxVariablePartitioner(
max_partitions=1,
axis=0,
min_slice_size=256 << 10,
var_name="v2_max",
var_shape=[2048, 1024],
expected_axis_shards=1,
expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=128 << 10,
var_name="v3_slice",
var_shape=[2048, 1024],
expected_axis_shards=64,
expected_partitions=[64, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=512 << 10,
var_name="v4_slice",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=1,
min_slice_size=256 << 10,
var_name="v5_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 3, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=3,
min_slice_size=256 << 10,
var_name="v6_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v7_shape",
var_shape=[16, 128, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v8_shape",
var_shape=[4, 512, 1024],
expected_axis_shards=4,
expected_partitions=[4, 1, 1])
def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
return [[(10**i) * v for v in val] for i in range(shape[0])]
class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
for i in xrange(len(expected_specs)):
self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec)
def testVecConstantInit(self):
with self.test_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def _testNameHelper(self, use_resource=False):
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope("hi", use_resource=use_resource):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
self.assertEqual("hi/PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope(
"hola", use_resource=use_resource) as vs:
vs1 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
with variable_scope.variable_scope(
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
self.assertEqual("hola/PartitionedVariable", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with ops.name_scope("ola"):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
self.assertEqual("PartitionedVariable", var1_name)
self.assertEqual("PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
def testName(self):
self._testNameHelper(use_resource=False)
def testResourceName(self):
self._testNameHelper(use_resource=True)
def testRandomInitValue(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, [
"200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
"200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
"200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
"200 40 0,200:36,4"
])
def testRandomInitUnevenPartitions(self):
with self.test_session():
rnd = variables.Variable(
random_ops.random_uniform([20, 43], dtype=dtypes.float64))
var_lists = [
partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
variables.global_variables_initializer().run()
rnd_val = rnd.eval()
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
[
"20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
"20 43 0,20:33,10"
],
# Five slices
[
"20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
"20 43 0,20:27,8", "20 43 0,20:35,8"
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1).eval()
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
"10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
"10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.test_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
variables.global_variables_initializer().run()
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0).eval()
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.test_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.test_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
def testConcat(self):
with self.test_session() as session:
var_x = variable_scope.get_variable(
"x",
initializer=constant_op.constant([1., 2.]),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_concat
]
concat_control_inputs = [
ci for op in concat_ops for ci in op.control_inputs
]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
variables.global_variables_initializer().run()
self.assertAllClose(value.eval(), var_x.as_tensor().eval())
def testVariableCreationInALoop(self):
"""Tests the variable created inside a loop can be used outside the loop."""
with self.test_session():
with variable_scope.variable_scope("ascope") as scope:
def Body(i, _):
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(
4))
return (i + 1, var_x.as_tensor())
cond = lambda i, _: i < 2
_, x = control_flow_ops.while_loop(
cond, Body, (0, constant_op.constant([7, 8], dtypes.float32)))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 1.0], x.eval())
scope.reuse_variables()
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
self.assertAllClose([1.0, 1.0], var_x.as_tensor().eval())
def testReadInWhileLoop(self):
"""Tests the value is current (not cached) when read within a loop."""
with self.test_session():
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
def Body(i, _):
# Use a SGD step to update the variable's value.
loss = math_ops.reduce_sum(var_x)
optimizer = gradient_descent.GradientDescentOptimizer(1.0)
minimize = optimizer.minimize(loss * 0.7)
with ops.control_dependencies([minimize]):
return (i + 1, var_x.as_tensor())
cond = lambda i, _: i < 2
_, x = control_flow_ops.while_loop(
cond, Body, (0, constant_op.constant([7, 8], dtypes.float32)))
variables.global_variables_initializer().run()
self.assertAllClose([-0.4, -0.4], x.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,213,598,942,687,017,000 | 40.395534 | 116 | 0.599384 | false |
eeshangarg/zulip | zerver/views/realm_icon.py | 1 | 2428 | from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from zerver.decorator import require_realm_admin
from zerver.lib.actions import do_change_icon_source
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.response import json_error, json_success
from zerver.lib.upload import upload_icon_image
from zerver.lib.url_encoding import add_query_arg_to_redirect_url
from zerver.models import UserProfile
@require_realm_admin
def upload_icon(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if len(request.FILES) != 1:
return json_error(_("You must upload exactly one icon."))
icon_file = list(request.FILES.values())[0]
if (settings.MAX_ICON_FILE_SIZE_MIB * 1024 * 1024) < icon_file.size:
return json_error(
_("Uploaded file is larger than the allowed limit of {} MiB").format(
settings.MAX_ICON_FILE_SIZE_MIB,
)
)
upload_icon_image(icon_file, user_profile)
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_UPLOADED, acting_user=user_profile
)
icon_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=icon_url,
)
return json_success(json_result)
@require_realm_admin
def delete_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
# We don't actually delete the icon because it might still
# be needed if the URL was cached and it is rewritten
# in any case after next update.
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_FROM_GRAVATAR, acting_user=user_profile
)
gravatar_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=gravatar_url,
)
return json_success(json_result)
def get_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
url = realm_icon_url(user_profile.realm)
# We can rely on the URL already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_icon_url does '?version=version_number'
# hacks to prevent us from having to jump through decode/encode hoops.
url = add_query_arg_to_redirect_url(url, request.META["QUERY_STRING"])
return redirect(url)
| apache-2.0 | -8,473,574,400,917,262,000 | 37.539683 | 91 | 0.714168 | false |
jashort/SmartFileSorter | tests/test_integration_tests.py | 1 | 1059 | import unittest
import smartfilesorter
import os
import tempfile
import shutil
class TestIntegrationTests(unittest.TestCase):
"""
Broader test cases
"""
def setUp(self):
self.source_dir = tempfile.mkdtemp()
self.dest_dir = os.path.join(self.source_dir, 'dest/')
os.mkdir(self.dest_dir)
self.test_filename = "test.txt"
self.source_file = os.path.join(self.source_dir, self.test_filename)
self.dest_file = os.path.join(self.dest_dir, self.test_filename)
with open(self.source_file, 'w') as output:
output.write("This is a test file.")
self.s = smartfilesorter.SmartFileSorter()
def tearDown(self):
shutil.rmtree(self.source_dir)
def test_file_matches_multiple_rulesets(self):
test_path = os.path.dirname(__file__)
test_file = os.path.join(test_path, 'match_multiple_rulesets.yml')
self.s.args = self.s.parse_arguments([test_file, self.source_dir])
self.s.create_logger(self.s.args)
self.s.run(self.s.args)
| bsd-3-clause | -8,757,449,511,512,382,000 | 30.147059 | 76 | 0.645892 | false |
huiyiqun/check_mk | cmk/regex.py | 1 | 2740 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2016 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
"""This module wraps some regex handling functions used by Check_MK"""
import re
from .exceptions import MKGeneralException
# TODO: Clean this up one day by using the way recommended by gettext.
# (See https://docs.python.org/2/library/gettext.html). For this we
# need the path to the locale files here.
try:
_
except NameError:
_ = lambda x: x # Fake i18n when not available
g_compiled_regexes = {}
def regex(pattern):
"""Compile regex or look it up in already compiled regexes.
(compiling is a CPU consuming process. We cache compiled regexes)."""
try:
return g_compiled_regexes[pattern]
except KeyError:
pass
try:
reg = re.compile(pattern)
except Exception, e:
raise MKGeneralException(_("Invalid regular expression '%s': %s") % (pattern, e))
g_compiled_regexes[pattern] = reg
return reg
# Checks if a string contains characters that make it neccessary
# to use regular expression logic to handle it correctly
def is_regex(pattern):
for c in pattern:
if c in '.?*+^$|[](){}\\':
return True
return False
def escape_regex_chars(match):
r = ""
for c in match:
if c in r"[]\().?{}|*^$+":
r += "\\"
r += c
return r
| gpl-2.0 | -2,221,331,912,281,145,600 | 36.027027 | 89 | 0.531752 | false |
SublimeHaskell/SublimeHaskell | hsdev/backend.py | 1 | 36770 | """
The `hsdev` backend.
"""
from functools import reduce
import io
import json
import os
import os.path
import pprint
import re
import subprocess
import threading
import sublime
import SublimeHaskell.hsdev.callback as HsCallback
import SublimeHaskell.hsdev.client as HsDevClient
import SublimeHaskell.hsdev.result_parse as ResultParse
import SublimeHaskell.internals.backend as Backend
import SublimeHaskell.internals.logging as Logging
import SublimeHaskell.internals.output_collector as OutputCollector
import SublimeHaskell.internals.proc_helper as ProcHelper
import SublimeHaskell.internals.settings as Settings
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.sublime_haskell_common as Common
def result_identity(resp):
'''Identity function for results
'''
return resp
class HsDevBackend(Backend.HaskellBackend):
"""This class encapsulates all of the functions that interact with the `hsdev` backend.
"""
HSDEV_DEFAULT_PORT = 4567
HSDEV_DEFAULT_HOST = 'localhost'
HSDEV_NOT_FOUND = [0, 0, 0, 0]
HSDEV_MIN_VER = [0, 3, 3, 0] # minimum hsdev version
HSDEV_MAX_VER = [0, 3, 4, 0] # maximum hsdev version
HSDEV_CALL_TIMEOUT = 300.0 # second timeout for synchronous requests (5 minutes should be enough, no?)
def __init__(self, backend_mgr, local=True, port=HSDEV_DEFAULT_PORT, host=HSDEV_DEFAULT_HOST, **kwargs):
super().__init__(backend_mgr)
Logging.log('{0}.__init__({1}, {2})'.format(type(self).__name__, host, port), Logging.LOG_INFO)
# Sanity checking:
exec_with = kwargs.get('exec-with')
install_dir = kwargs.get('install-dir')
if bool(exec_with) ^ bool(install_dir):
if install_dir is None:
sublime.error_message('\n'.join(['\'exec_with\' requires an \'install_dir\'.',
'',
'Please check your \'backends\' configuration and retry.']))
raise RuntimeError('\'exec_with\' requires an \'install_dir\'.')
else:
sublime.error_message('\n'.join(['\'install_dir\' requires an \'exec_with\'.',
'',
'Please check your \'backends\' configuration and retry.']))
raise RuntimeError('\'install_dir\' requires an \'exec_with\'.')
elif exec_with and exec_with not in ['stack', 'cabal', 'cabal-new-build']:
sublime.error_message('\n'.join(['Invalid backend \'exec_with\': {0}'.format(exec_with),
'',
'Valid values are "cabal", "cabal-new-build" or "stack".',
'Please check your \'backends\' configuration and retry.']))
raise RuntimeError('Invalid backend \'exec_with\': {0}'.format(exec_with))
# Local hsdev server process and params
self.is_local_hsdev = local
self.hsdev_process = None
self.cache = os.path.join(Common.sublime_haskell_cache_path(), 'hsdev', 'hsdev.db')
self.log_file = os.path.join(Common.sublime_haskell_cache_path(), 'hsdev', 'hsdev.log')
self.exec_with = exec_with
self.install_dir = Utils.normalize_path(install_dir) if install_dir is not None else None
# Keep track of the hsdev version early. Needed to patch command line arguments later.
self.version = HsDevBackend.hsdev_version(self.exec_with, self.install_dir)
self.drain_stdout = None
self.drain_stderr = None
# Connection params
self.port = port
self.hostname = host
if self.is_local_hsdev:
self.hostname = self.HSDEV_DEFAULT_HOST
self.client = None
self.serial_lock = threading.RLock()
self.request_serial = 1
@staticmethod
def backend_name():
return 'hsdev'
@staticmethod
def is_available(**kwargs):
# Yes, this is slightly redundant because eventually __init__ does the same thing for a class
# instance.
exec_with = kwargs.get('exec-with')
install_dir = kwargs.get('install-dir')
local = kwargs.get('local', False)
exec_install_set = not bool(exec_with) ^ bool(install_dir)
backend_name = kwargs.get('backend_name', 'not specified.')
if exec_install_set or local:
if not exec_install_set:
# Either exec-with or install-dir isn't set, so the corresponding configuration target is unavailable.
return False
hsdev_ver = HsDevBackend.hsdev_version(exec_with, install_dir)
str_version = '.'.join([str(v) for v in hsdev_ver])
Logging.log('hsdev version: {0}'.format(str_version), Logging.LOG_INFO)
retval = hsdev_ver >= HsDevBackend.HSDEV_MIN_VER and hsdev_ver < HsDevBackend.HSDEV_MAX_VER
if not retval:
if retval != HsDevBackend.HSDEV_NOT_FOUND:
min_version = '.'.join([str(v) for v in HsDevBackend.HSDEV_MIN_VER])
max_version = '.'.join([str(v) for v in HsDevBackend.HSDEV_MAX_VER])
msg = '\n'.join(['Backend configuration: "{0}"'.format(backend_name),
'',
'Incompatible hsdev, detected version ' + str_version,
'Version should be \u2265 ' + min_version + ' and < ' + max_version])
else:
msg = '\n'.join(['Backend configuration: "{0}"'.format(backend_name),
'',
'Tried executing hsdev to get a version number, not successful.',
'Is hsdev installed (or built, if using stack or cabal exec wrappers)?'])
sublime.message_dialog(msg)
return retval
# Assume that a remote backend is actually available. Ultimately, we might not connect to it, but
# it is available to us as a backend.
return True
def start_backend(self):
retval = True
if self.is_local_hsdev:
Logging.log('Starting local \'hsdev\' server', Logging.LOG_INFO)
log_level = Settings.PLUGIN.hsdev_log_level
cmd = self.concat_args([(True, ["hsdev"]),
(True, ["run"]),
(self.port, ["--port", str(self.port)]),
(self.cache, ["--db", self.cache]),
(self.log_file, ["--log", self.log_file]),
(True, ["--log-level", log_level]),
(True, ["--no-color"])])
hsdev_proc = ProcHelper.exec_with_wrapper(self.exec_with, self.install_dir, cmd)
if hsdev_proc.process is not None:
# Use TextIOWrapper here because it combines decoding with newline handling,
# which means less to maintain.
hsdev_proc.process.stdout = io.TextIOWrapper(hsdev_proc.process.stdout, 'utf-8')
hsdev_proc.process.stderr = io.TextIOWrapper(hsdev_proc.process.stderr, 'utf-8')
# Read and wait for hsdev's startup messge. 15 seconds should be enough time for the message to appear.
# Otherwise, kill the thread because we don't want to get stuck waiting forever.
startup_reader = HsDevStartupReader(hsdev_proc.process.stdout)
startup_reader.start()
startup_reader.wait_startup(15.0)
if startup_reader.successful():
port = startup_reader.port()
if port != self.port:
Logging.log('hsdev: server port changed, was {0}, now {1}'.format(self.port, port), Logging.LOG_WARNING)
self.port = port
self.drain_stdout = OutputCollector.DescriptorDrain('hsdev stdout', hsdev_proc.process.stdout)
self.drain_stderr = OutputCollector.DescriptorDrain('hsdev stderr', hsdev_proc.process.stderr)
self.drain_stdout.start()
self.drain_stderr.start()
self.hsdev_process = hsdev_proc
Logging.log('Local \'hsdev\' server started successfully.', Logging.LOG_INFO)
else:
# This is a bit of a "Hail Mary!" because readline() could just hang forever. Just to make sure,
# kill the process too!
startup_reader.stop()
hsdev_proc.process.kill()
if hsdev_proc.process_err is not None:
Logging.log('Possible reason for timeout: {0}'.format(hsdev_proc.process_err))
self.hsdev_process = None
retval = False
sublime.error_message('Timed out waiting for \'hsdev\' to start up.')
else:
errmsg = 'Could not start local \'hsdev\' server because:\n\n' + hsdev_proc.process_err
sublime.error_message(errmsg)
self.hsdev_process = None
retval = False
return retval
def connect_backend(self):
Logging.log('Connecting to \'hsdev\' server at {0}:{1}'.format(self.hostname, self.port), Logging.LOG_INFO)
retval = True
self.client = HsDevClient.HsDevClient(self.backend_mgr)
if self.client.connect(self.hostname, self.port):
# For a local hsdev server that we started, send the link command so that it exits when we exit.
if self.is_local_hsdev:
self.link()
else:
Logging.log('Connections to \'hsdev\' server unsuccessful, see tracebacks to diagnose.', Logging.LOG_ERROR)
retval = False
return retval
def disconnect_backend(self):
self.exit()
self.client.close()
def stop_backend(self):
if self.is_local_hsdev:
try:
self.hsdev_process.process.wait(90.0)
except subprocess.TimeoutExpired:
sublime.message_dialog('\n'.join(['Time out waiting for \'hsdev\' process to terminate.',
'',
'You may have to kill this process manually from a terminal or',
'console window\'s command line.']))
def is_live_backend(self):
return self.client.is_connected()
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# File/project tracking functions:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
## Pylint deems these two methods unncessary since all they do is call the superclass. However, I'm
## leaving them here just in case something more interesting has to be done in addition to calling
## the superclass.
# def add_project_file(self, filename, project, project_dir):
# super().add_project_file(filename, project, project_dir)
# def remove_project_file(self, filename):
# super().remove_project_file(filename)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Features
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def auto_rescan(self):
return True
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Utility functions used to implement the API:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
@staticmethod
def hsdev_version(exec_with, install_dir, output_compiler_version=False):
retval = [0, 0, 0, 0]
compiler_version = None
cmd = ['hsdev', 'version']
if output_compiler_version:
cmd.append('-c')
hsdev_proc = ProcHelper.exec_with_wrapper(exec_with, install_dir, cmd)
if hsdev_proc.process is not None:
exit_code, out, _ = hsdev_proc.wait()
if exit_code == 0:
## 'cabal new-run' can spit out multiple lines of status before executing the task:
for line in out.splitlines():
hsver = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)\.(?P<build>\d+)', line)
if hsver:
major = int(hsver.group('major'))
minor = int(hsver.group('minor'))
revision = int(hsver.group('revision'))
build = int(hsver.group('build'))
retval = [major, minor, revision, build]
compiler_version = line.split()[1] if output_compiler_version else None
break
return (retval, compiler_version) if output_compiler_version else retval
@staticmethod
def concat_args(args):
def inner_concat(left, right):
(left_pred, left_expr) = left
(right_pred, right_expr) = right
return (left_pred or right_pred, (left_expr if left_pred else []) + (right_expr if right_pred else []))
return reduce(inner_concat, args, (True, []))[1]
def files_and_contents(self, files, contents):
contents = contents or {}
retval = [{'file': f, 'contents': contents.get(f)} for f in files] if files else []
return retval
def make_callbacks(self, name, on_response=None, result_convert=result_identity, on_notify=None, on_error=None,
**backend_args):
with self.serial_lock:
req_serial = str(self.request_serial)
self.request_serial += 1
# Clean up backend arguments:
for param in ['on_response', 'result_convert', 'on_notify', 'on_error']:
if param in backend_args:
del backend_args[param]
return (HsCallback.HsDevCallbacks(req_serial, name, on_response, result_convert, on_notify, on_error), backend_args)
def hsdev_command(self, name, opts, callbacks, async_cmd=False, timeout=HSDEV_CALL_TIMEOUT, is_list=False,
on_result_part=None, split_result=None):
if split_result is None:
split_res = on_result_part is not None
if is_list and split_res:
result = []
def hsdev_command_notify(reply):
if 'result-part' in reply:
notify_result = callbacks.call_result_convert([reply['result-part']])[0]
on_result_part(notify_result)
result.append(notify_result)
else:
callbacks.call_notify(reply)
# FIXME: Is this option still used?
opts.update({'split-result': None})
callbacks.add_notify(hsdev_command_notify)
resp = self.client.call(name, opts, callbacks, wait=not async_cmd, timeout=timeout)
return resp
def command(self, name, opts, callbacks, timeout=HSDEV_CALL_TIMEOUT, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=False, timeout=timeout, is_list=False,
on_result_part=on_result_part, split_result=split_result)
def async_command(self, name, opts, callbacks, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=True, timeout=None, is_list=False,
on_result_part=on_result_part, split_result=split_result)
def list_command(self, name, opts, callbacks, timeout=HSDEV_CALL_TIMEOUT, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=False, timeout=timeout, is_list=True,
on_result_part=on_result_part, split_result=split_result)
def async_list_command(self, name, opts, callbacks, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=True, timeout=None, is_list=True,
on_result_part=on_result_part, split_result=split_result)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API implementation:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def link(self, hold=False):
return self.command('link', {'hold': hold}, self.make_callbacks('link')[0])
def ping(self):
return self.command('ping', {}, lambda r: r and ('message' in r) and (r['message'] == 'pong'),
self.make_callbacks('ping')[0])
def scan(self, cabal=False, sandboxes=None, projects=None, files=None, paths=None, ghc=None, contents=None,
docs=False, infer=False, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan', **backend_args)
return action('scan', {'projects': projects or [],
'cabal': cabal,
'sandboxes': sandboxes or [],
'files': self.files_and_contents(files, contents),
'paths': paths or [],
'ghc-opts': ghc or [],
'docs': docs,
'infer': infer},
callbacks, **backend_args)
def scan_project(self, project, build_tool=None, no_deps=False, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan project', **backend_args)
return action(
'scan project',
{
'project': project,
'build-tool': build_tool,
'scan-deps': not no_deps,
},
callbacks,
**backend_args
)
def scan_file(self, file, build_tool=None, no_project=False, no_deps=False, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan file', **backend_args)
return action(
'scan file',
{
'file': file,
'build-tool': build_tool,
'scan-project': not no_project,
'scan-deps': not no_deps,
},
callbacks,
**backend_args
)
def scan_package_dbs(self, package_dbs, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan package-dbs', **backend_args)
return action(
'scan package-dbs',
{'package-db-stack': [{'package-db': p} if p not in ['user-db', 'global-db'] else p for p in package_dbs]},
callbacks,
**backend_args
)
def set_file_contents(self, file, contents=None, **backend_args):
callbacks, backend_args = self.make_callbacks('set-file-contents', **backend_args)
return self.command('set-file-contents', {'file': file, 'contents': contents}, callbacks, **backend_args)
def docs(self, projects=None, files=None, **backend_args):
callbacks, backend_args = self.make_callbacks('docs', **backend_args)
return self.async_command('docs', {'projects': projects or [],
'files': files or []},
callbacks, **backend_args)
def infer(self, projects=None, files=None, **backend_args):
callbacks, backend_args = self.make_callbacks('infer', **backend_args)
return self.async_command('infer', {'projects': projects or [],
'files': files or []},
callbacks, **backend_args)
def remove(self, cabal=False, sandboxes=None, projects=None, files=None, **backend_args):
callbacks, backend_args = self.make_callbacks('remove', **backend_args)
return self.async_list_command('remove', {'projects': projects or [],
'cabal': cabal,
'sandboxes': sandboxes or [],
'files': files or []},
callbacks, **backend_args)
def remove_all(self, **backend_args):
callbacks, backend_args = self.make_callbacks('remove-all', **backend_args)
return self.command('remove-all', {}, callbacks, **backend_args)
def list_packages(self, **backend_args):
callbacks, backend_args = self.make_callbacks('packages', **backend_args)
return self.list_command('packages', {}, callbacks, **backend_args)
def list_projects(self, **backend_args):
callbacks, backend_args = self.make_callbacks('projects', **backend_args)
return self.list_command('projects', {}, callbacks, **backend_args)
def list_sandboxes(self, **backend_args):
return self.list_command('sandboxes', {}, **backend_args)
def symbol(self, lookup="", search_type='prefix', project=None, file=None, module=None, package=None, installed=False, source=False, standalone=False, local_names=False, header=False, **backend_args):
# search_type is one of: exact, prefix, infix, suffix
query = {'input': lookup, 'type': search_type}
filters = []
if project:
filters.append({'project': project})
if file:
filters.append({'file': file})
if module:
filters.append({'module': module})
if package:
filters.append({'package': package})
if installed:
filters.append('installed')
if source:
filters.append('sourced')
if standalone:
filters.append('standalone')
callbacks, backend_args = self.make_callbacks('symbol', result_convert=ResultParse.parse_symbol_ids if header else ResultParse.parse_symbols, **backend_args)
return self.list_command('symbol', {'query': query, 'filters': filters, 'locals': local_names, 'header': header},
callbacks, **backend_args)
def module(self, _projectname, lookup="", search_type='prefix', project=None, file=None, module=None, package=None, installed=False, source=False, standalone=False, header=False, **backend_args):
query = {'input': lookup, 'type': search_type}
filters = []
if project:
filters.append({'project': project})
if file:
filters.append({'file': file})
if module:
filters.append({'module': module})
if package:
filters.append({'package': package})
if installed:
filters.append('installed')
if source:
filters.append('sourced')
if standalone:
filters.append('standalone')
callbacks, backend_args = self.make_callbacks('module', result_convert=ResultParse.parse_module_ids if header else ResultParse.parse_modules, **backend_args)
return self.command('module', {'query': query, 'filters': filters, 'header': header, 'inspection': False},
callbacks, **backend_args)
def project(self, project=None, path=None, **backend_args):
callbacks, backend_args = self.make_callbacks('project', **backend_args)
return self.command('project', {'name': project} if project else {'path': path}, callbacks, **backend_args)
def sandbox(self, path, **backend_args):
callbacks, backend_args = self.make_callbacks('sandbox', **backend_args)
return self.command('sandbox', {'path': path}, callbacks, **backend_args)
def lookup(self, name, file, **backend_args):
callbacks, backend_args = self.make_callbacks('lookup', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('lookup', {'name': name, 'file': file}, callbacks, **backend_args)
def whois(self, name, file, **backend_args):
callbacks, backend_args = self.make_callbacks('whois', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('whois', {'name': name, 'file': file}, callbacks, **backend_args)
def whoat(self, line, column, file, **backend_args):
callbacks, backend_args = self.make_callbacks('whoat', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('whoat', {'line': line, 'column': column, 'file': file}, callbacks, **backend_args)
def scope_modules(self, _projcname, file, lookup='', search_type='prefix', **backend_args):
callbacks, backend_args = self.make_callbacks('scope_modules', result_convert=ResultParse.parse_module_ids,
**backend_args)
return self.list_command('scope modules', {'query': {'input': lookup, 'type': search_type}, 'file': file},
callbacks, **backend_args)
def scope(self, file, lookup='', search_type='prefix', global_scope=False, **backend_args):
callbacks, backend_args = self.make_callbacks('scope', result_convert=ResultParse.parse_symbol_ids, **backend_args)
return self.list_command('scope',
{'query': {'input': lookup,
'type': search_type
},
'file': file
}, callbacks, **backend_args)
def usages(self, line, column, file, **backend_args):
callbacks, backend_args = self.make_callbacks('usages', result_convert=ResultParse.parse_symbol_usages, **backend_args)
return self.list_command('usages', {'line': line, 'column': column, 'file': file}, callbacks, **backend_args)
def complete(self, sym, file, wide=False, **backend_args):
qname = sym.qualified_name() if sym.name is not None else sym.module + '.'
callbacks, backend_args = self.make_callbacks('complete', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('complete', {'prefix': qname, 'wide': wide, 'file': file},
callbacks, **backend_args)
def hayoo(self, query, page=None, pages=None, **backend_args):
callbacks, backend_args = self.make_callbacks('hayoo', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('hayoo', {'query': query, 'page': page or 0, 'pages': pages or 1},
callbacks, **backend_args)
def cabal_list(self, packages, **backend_args):
def convert_to_cabal_packages(pkg_list):
return [ResultParse.parse_cabal_package(pkg) for pkg in pkg_list] if pkg_list else None
callbacks, backend_args = self.make_callbacks('cabal list', result_convert=convert_to_cabal_packages, **backend_args)
return self.list_command('cabal list', {'packages': packages}, callbacks, **backend_args)
def unresolveds(self, files, **backend_args):
callbacks, backend_args = self.make_callbacks('unresolveds', **backend_args)
return self.list_command('unresolveds', {'files': files}, callbacks, **backend_args)
def lint(self, files=None, contents=None, hlint=None, wait_complete=False, **backend_args):
action = self.list_command if wait_complete else self.async_list_command
result_convert = backend_args.pop('result_convert', [])
if result_convert and not isinstance(result_convert, list):
result_convert = [result_convert]
result_convert.append(self.convert_warnings)
callbacks, backend_args = self.make_callbacks('lint', result_convert=result_convert, **backend_args)
return action('lint', {'files': self.files_and_contents(files, contents),
'lint-opts': hlint or []},
callbacks, **backend_args)
def check(self, files=None, contents=None, ghc=None, wait_complete=False, **backend_args):
action = self.list_command if wait_complete else self.async_list_command
callbacks, backend_args = self.make_callbacks('check', **backend_args)
return action('check', {'files': self.files_and_contents(files, contents),
'ghc-opts': ghc or []},
callbacks, **backend_args)
def check_lint(self, files=None, contents=None, ghc=None, hlint=None, wait_complete=False, **backend_args):
action = self.list_command if wait_complete else self.async_list_command
result_convert = backend_args.pop('result_convert', [])
if result_convert and not isinstance(result_convert, list):
result_convert = [result_convert]
result_convert.append(self.convert_warnings)
callbacks, backend_args = self.make_callbacks('check-lint', result_convert=result_convert, **backend_args)
return action('check-lint', {'files': self.files_and_contents(files, contents),
'ghc-opts': ghc or [],
'lint-opts': hlint or []},
callbacks, **backend_args)
def types(self, _projectname, file, _modulename, _line, _column, ghc_flags=None, contents=None, **backend_args):
callbacks, backend_args = self.make_callbacks('types', **backend_args)
return self.list_command('types', {'files': self.files_and_contents(file, contents),
'ghc-opts': ghc_flags or []},
callbacks, **backend_args)
def autofixes(self, messages, wait_complete=False, **backend_args):
callbacks, backend_args = self.make_callbacks('autofixes', result_convert=ResultParse.parse_corrections, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('autofixes', {'messages': messages}, callbacks, **backend_args)
def refactor(self, messages, rest=[], pure=True, wait_complete=False, **backend_args):
callbacks, backend_args = self.make_callbacks('refactor', result_convert=ResultParse.parse_corrections, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('refactor', {'messages': messages, 'rest': rest, 'pure': pure}, callbacks, **backend_args)
def rename(self, name, new_name, file, wait_complete=False, **backend_args):
callbacks, backend_args = self.make_callbacks('rename', result_convert=ResultParse.parse_corrections, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('rename', {'name': name, 'new-name': new_name, 'file': file}, callbacks, **backend_args)
def langs(self, _projectname, **backend_args):
callbacks, backend_args = self.make_callbacks('langs', **backend_args)
return self.command('langs', {}, callbacks, **backend_args)
def flags(self, _projectname, **backend_args):
callbacks, backend_args = self.make_callbacks('flags', **backend_args)
return self.command('flags', {}, callbacks, **backend_args)
def ghc_eval(self, exprs, file=None, source=None, wait_complete=False, **backend_args):
the_file = None
if file is not None:
the_file = {'file': file, 'contents': source}
callbacks, backend_args = self.make_callbacks('ghc eval', result_convert=ResultParse.parse_repl_results, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('ghc eval', {'exprs': exprs, 'file': the_file}, callbacks, **backend_args)
def ghc_type(self, exprs, file=None, source=None, wait_complete=False, **backend_args):
the_file = None
if file is not None:
the_file = {'file': file, 'contents': source}
callbacks, backend_args = self.make_callbacks('ghc type', result_convert=ResultParse.parse_repl_results, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('ghc type', {'exprs': exprs, 'file': the_file}, callbacks, **backend_args)
def stop_ghc(self, **backend_args):
callbacks, backend_args = self.make_callbacks('stop-ghc', **backend_args)
return self.command('stop-ghc', {}, callbacks, **backend_args)
def exit(self):
return self.command('exit', {}, self.make_callbacks('exit')[0])
# old names for compatibility
def autofix_show(self, messages, wait_complete=False, **backend_args):
return self.autofixes(messages, wait_complete=wait_complete, **backend_args)
def autofix_fix(self, messages, rest=[], pure=True, wait_complete=False, **backend_args):
return self.refactor(messages, rest=rest, pure=pure, wait_complete=wait_complete, **backend_args)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Advanced features:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def query_import(self, symname, filename):
if self.whois(symname, filename):
return (False, ['Symbol {0} already in scope'.format(symname)])
candidates = list(filter(
lambda c: c.imported_from is not None,
self.lookup(symname, filename),
))
return (True, candidates) if candidates else (False, ['Symbol {0} not found'.format(symname)])
def contents_to_module(self, file, contents):
self.set_file_contents(file, contents)
m = self.module(file=file, header=True)
proj = self.project(path=m.location.project)
build_tool = proj['build-tool']
self.scan_file(file=file, build_tool=build_tool, wait_complete=True)
return Utils.head_of(self.module(None, file=file))
def clean_imports(self, filename):
cmd = ['hsclearimports', filename, '--max-import-list', '64']
hsclean_proc = ProcHelper.exec_with_wrapper(self.exec_with, self.install_dir, cmd)
if hsclean_proc.process is not None:
exit_code, result, err = hsclean_proc.wait()
if exit_code == 0:
return (True, result.splitlines())
return (False, err)
return (False, ['\'hscleanimports\' utility not found.'])
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Utility functions:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def convert_warnings(self, messages):
for msg in messages:
if msg.get('level', '') == 'warning':
msg['level'] = 'hint'
return messages
class HsDevStartupReader(threading.Thread):
'''Separate thread object that reads the local `hsdev` server's `stdout` looking for the server's startup
message. The server's port number is parsed from the startup message and saved in the object's `hsdev_port`
attribute, just in case this differs from the default or requested port.
'''
def __init__(self, fstdout):
super().__init__(name='hsdev startup reader')
self.stdout = fstdout
self.hsdev_port = -1
self.end_event = threading.Event()
def run(self):
self.end_event.clear()
while not self.end_event.is_set():
srvout = self.stdout.readline().strip()
Logging.log('hsdev initial: {0}'.format(srvout), Logging.LOG_DEBUG)
if srvout != '':
start_confirm = re.search(r'[Ss]erver started at port (?P<port>\d+)$', srvout)
if start_confirm:
self.hsdev_port = int(start_confirm.group('port'))
Logging.log('hsdev initial: \'hsdev\' server started at port {0}'.format(self.hsdev_port))
self.end_event.set()
else:
# Got EOF, stop loop.
self.end_event.set()
def wait_startup(self, tmo):
self.end_event.wait(tmo)
def successful(self):
return self.end_event.is_set()
def stop(self):
self.end_event.clear()
def port(self):
return self.hsdev_port
| mit | -7,706,911,259,607,050,000 | 49.438957 | 204 | 0.571716 | false |
RCOS-Grading-Server/HWserver | tests/e2e/base_testcase.py | 2 | 10499 | import shutil
import tempfile
from datetime import date
import os
import unittest
import json
from urllib.parse import urlencode
from urllib.parse import urlparse
from selenium import webdriver
from websocket import create_connection
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import sys
# explicitly add this import path, so we can run it on a local host
sys.path.append('../python_submitty_utils/')
from submitty_utils import dateutils
# noinspection PyPep8Naming
class BaseTestCase(unittest.TestCase):
"""
Base class that all e2e tests should extend. It provides several useful
helper functions, sets up the selenium webdriver, and provides a common
interface for logging in/out a user. Each test then only really needs to
override user_id, user_name, and user_password as necessary for a
particular testcase and this class will handle the rest to setup the test.
"""
TEST_URL = "http://localhost:1501"
USER_ID = "student"
USER_NAME = "Joe"
USER_PASSWORD = "student"
WAIT_TIME = 20
def __init__(self, testname, user_id=None, user_password=None, user_name=None, log_in=True, use_websockets=False, socket_page=''):
super().__init__(testname)
if "TEST_URL" in os.environ and os.environ['TEST_URL'] is not None:
self.test_url = os.environ['TEST_URL']
else:
self.test_url = BaseTestCase.TEST_URL
self.driver = None
""" :type driver: webdriver.Chrome """
self.options = webdriver.ChromeOptions()
self.options.add_argument('--no-sandbox')
self.options.add_argument('--headless')
self.options.add_argument("--disable-extensions")
self.options.add_argument('--hide-scrollbars')
self.options.add_argument('--disable-gpu')
self.options.add_argument('--no-proxy-server')
self.download_dir = tempfile.mkdtemp(prefix="vagrant-submitty")
# https://stackoverflow.com/a/26916386/214063
profile = {
'download.prompt_for_download': False,
'download.default_directory': self.download_dir,
'download.directory_upgrade': True,
'plugins.plugins_disabled': ['Chrome PDF Viewer']
}
self.options.add_experimental_option('prefs', profile)
self.user_id = user_id if user_id is not None else BaseTestCase.USER_ID
self.user_name = user_name if user_name is not None else BaseTestCase.USER_NAME
if user_password is None and user_id is not None:
user_password = user_id
self.user_password = user_password if user_password is not None else BaseTestCase.USER_PASSWORD
self.semester = dateutils.get_current_semester()
self.full_semester = BaseTestCase.get_display_semester(self.semester)
self.logged_in = False
self.use_log_in = log_in
self.use_websockets = use_websockets
self.socket_page = socket_page
def setUp(self):
# attempt to set-up the connection to Chrome. Repeat a handful of times
# in-case Chrome crashes during initialization
for _ in range(5):
try:
self.driver = webdriver.Chrome(options=self.options)
break
except WebDriverException:
pass
if self.driver is None:
self.driver = webdriver.Chrome(options=self.options)
self.driver.set_window_size(1600, 900)
self.enable_download_in_headless_chrome(self.download_dir)
if self.use_log_in:
self.log_in()
if self.use_websockets:
self.enable_websockets()
def tearDown(self):
self.driver.quit()
shutil.rmtree(self.download_dir)
if self.use_websockets:
self.ws.close()
def get(self, url=None, parts=None):
if url is None:
# Can specify parts = [('semester', 's18'), ...]
self.assertIsNotNone(parts)
url = "/index.php?" + urlencode(parts)
if url[0] != "/":
url = "/" + url
self.driver.get(self.test_url + url)
# Frog robot
self.assertNotEqual(self.driver.title, "Submitty - Error", "Got Error Page")
def log_in(self, url=None, title="Submitty", user_id=None, user_password=None, user_name=None):
"""
Provides a common function for logging into the site (and ensuring
that we're logged in)
:return:
"""
if url is None:
url = "/index.php"
if user_password is None:
user_password = user_id if user_id is not None else self.user_password
if user_id is None:
user_id = self.user_id
if user_name is None:
user_name = self.user_name
self.get(url)
# print(self.driver.page_source)
self.driver.find_element(By.NAME, 'user_id').send_keys(user_id)
self.driver.find_element(By.NAME, 'password').send_keys(user_password)
self.driver.find_element(By.NAME, 'login').click()
# OLD self.assertEqual(user_name, self.driver.find_element(By.ID, "login-id").get_attribute('innerText').strip(' \t\r\n'))
# FIXME: WANT SOMETHING LIKE THIS... WHEN WE HAVE JUST ONE ELEMENT WITH THIS ID
# self.assertEqual("Logout "+user_name, self.driver.find_element(By.ID, "logout").get_attribute('innerText').strip(' \t\r\n'))
# instead, just make sure this element exists
self.driver.find_element(By.ID, "logout")
self.logged_in = True
def log_out(self):
if self.logged_in:
self.logged_in = False
self.driver.find_element(By.ID, 'logout').click()
self.driver.find_element(By.ID, 'login-guest')
def click_class(self, course, course_name=None):
if course_name is None:
course_name = course
course_name = course_name.title()
self.driver.find_element(By.ID, dateutils.get_current_semester() + '_' + course).click()
# print(self.driver.page_source)
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.title_is('Gradeables - ' + course_name))
# see Navigation.twig for html attributes to use as arguments
# loaded_selector must recognize an element on the page being loaded (test_simple_grader.py has xpath example)
def click_nav_grade_button(self, gradeable_category, gradeable_id, button_name, loaded_selector):
self.driver.find_element(By.XPATH,
"//div[@id='{}']/div[@class='course-button']/a[contains(@class, 'btn-nav-grade')]".format(
gradeable_id)).click()
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.presence_of_element_located(loaded_selector))
def click_nav_submit_button(self, gradeable_category, gradeable_id, button_name, loaded_selector):
self.driver.find_element(By.XPATH,
"//div[@id='{}']/div[@class='course-button']/a[contains(@class, 'btn-nav-submit')]".format(
gradeable_id)).click()
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.presence_of_element_located(loaded_selector))
# clicks the navigation header text to 'go back' pages
# for homepage, selector can be gradeable list
def click_header_link_text(self, text, loaded_selector):
self.driver.find_element(
By.XPATH,
"//div[@id='breadcrumbs']/div[@class='breadcrumb']/a[text()='{}']".format(text)
).click()
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.presence_of_element_located(loaded_selector))
def wait_after_ajax(self):
WebDriverWait(self.driver, 10).until(lambda driver: driver.execute_script("return jQuery.active == 0"))
def wait_for_element(self, element_selector, visibility=True, timeout=WAIT_TIME):
"""
Waits for an element to be present in the DOM. By default, also waits for the element to be
visible/interactable
"""
if visibility:
WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located(element_selector))
else:
WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(element_selector))
@staticmethod
def wait_user_input():
"""
Causes the running selenium test to pause until the user has hit the enter key in the
terminal that is running python. This is useful for using in the middle of building tests
as then you cna use the javascript console to inspect the page, get the name/id of elements
or other such actions and then use that to continue building the test
"""
input("Hit enter to continue...")
@staticmethod
def get_display_semester(current_semester):
s = 'Fall' if current_semester[0] == 'f' else 'Summer' if current_semester[0] == 'u' else 'Spring'
s += ' 20' + current_semester[1:]
return s
# https://stackoverflow.com/a/47366981/214063
def enable_download_in_headless_chrome(self, download_dir):
# add missing support for chrome "send_command" to selenium webdriver
self.driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_dir}}
self.driver.execute("send_command", params)
def enable_websockets(self):
submitty_session_cookie = self.driver.get_cookie('submitty_session')
address = self.test_url.replace('http', 'ws') + '/ws'
parsed = urlparse(address)
netloc = parsed.netloc
if ':' in netloc:
netloc = netloc.split(':', 1)[0]
netloc += ':8443'
address = parsed._replace(netloc=netloc).geturl()
self.ws = create_connection(address, cookie = submitty_session_cookie['name'] +'='+ submitty_session_cookie['value'], header={"User-Agent": "python-socket-client"})
new_connection_msg = json.dumps({'type': 'new_connection', 'page': self.semester + '-sample-' + self.socket_page})
self.ws.send(new_connection_msg)
def check_socket_message(self, message):
ws_msg = json.loads(self.ws.recv())
self.assertIn('type', ws_msg.keys())
self.assertEqual(ws_msg['type'], message)
| bsd-3-clause | -5,955,919,036,682,299,000 | 42.384298 | 172 | 0.645109 | false |
afsungur/MemWord | framefinish.py | 1 | 2063 | import wx
from griddict import GridDictionary
import Global
class FrameFinish(wx.Frame):
def __init__(self, parent, true_count, false_count, falses):
FRAME_SIZE_WIDTH = 800
FRAME_SIZE_HEIGHT = 300
FRAME_POS_X = 200
FRAME_POS_Y = 200
wx.Frame.__init__(self, parent, -1,
title=Global.FINISH_TITLE,
size=(FRAME_SIZE_WIDTH, FRAME_SIZE_HEIGHT),
pos=(FRAME_POS_X, FRAME_POS_Y),
style=wx.DEFAULT_FRAME_STYLE)
self.frame = parent
# Text Items
true_count_text = wx.StaticText(self, -1, Global.TRUE_COUNT_TEXT)
false_count_text = wx.StaticText(self, -1, Global.FALSE_COUNT_TEXT)
true_count_value = wx.StaticText(self, -1, str(true_count))
false_count_value = wx.StaticText(self, -1, str(false_count))
seperator = wx.StaticText(self, -1, "-----------------------------")
font = wx.Font(16, wx.MODERN, wx.NORMAL, wx.BOLD)
falses_big_text = wx.StaticText(self, -1, Global.WRONG_ANSWERS_TEXT+":")
falses_big_text.SetFont(font)
# Grid
grid_falses = GridDictionary(self, falses)
print "false count:", len(falses)
# Sizer Set
trueCountSizer = wx.GridBagSizer(2,2)
trueCountSizer.Add(true_count_text,pos=(0,0))
trueCountSizer.Add(true_count_value,pos=(0,1))
trueCountSizer.Add(false_count_text,pos=(1,0))
trueCountSizer.Add(false_count_value,pos=(1,1))
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(trueCountSizer, 0, wx.ALL, 5)
mainSizer.Add(seperator,0, wx.ALL, 5)
mainSizer.Add(falses_big_text,0, wx.ALL, 5)
mainSizer.Add(grid_falses, 0, wx.ALL, 5)
# Bind
self.Bind(wx.EVT_CLOSE, self.close_event)
# Frame Settings
self.SetSizer(mainSizer)
self.Fit()
self.Show()
def close_event(self, evt):
print "closed..."
self.frame.close()
| gpl-3.0 | -5,161,963,566,746,056,000 | 33.383333 | 80 | 0.56762 | false |
davidcdba/oBid | oBid/oBid/settings.py | 1 | 6054 | #encoding: utf-8
#Para que no de porculo los acentos y Ñ
# Django settings for oBid project.
## EXPLICACION ## IMPORTAMOS LA LIBRERIA 'os' del sistema y establecemos como PATH del proyecto la carpeta en la que se encuentra
import os
PROJECT_PATH=os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('i12gamad', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'oBid.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Puedes ver cuales son las zonas aqui:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
## EXPLICACION ## ESTABLECEMOS COMO ZONA HORARIA 'Europe/Madrid' para evitar cambios de tiempo
TIME_ZONE = 'Europe/Madrid'
## EXPLICACION ## ESTABLECEMOS COMO IDIOMA QUE USAREMOS EL ESPANOL DE ESPANA
LANGUAGE_CODE = 'es-es'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
## EXPLICACION ## ESTABLECE LA CARPETA 'media' de dentro del proyecto como carpeta donde se encuentra el contenido multimedia
MEDIA_ROOT = os.path.join(PROJECT_PATH,'media')
## EXPLICACION ## ESTABLECE LA ruta 'localhost:8000/media/' como ruta de acceso a la carpeta de contenido multimedia
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
## EXPLICACION ## ESTABLECE LA CARPETA 'static' de dentro del proyecto como carpeta donde se encuentra el contenido estatico
os.path.join(PROJECT_PATH,'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
## AVISO ## Linea descomentada, activa la ruta a contenidos estaticos
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '##d-1%bpw32#q*_#q6e)07_n01$qy!s&9mx6_2yh4p6)gv^^p&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'oBid.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'oBid.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
## EXPLICACION ## ESTABLECE LA CARPETA 'templates' de dentro del proyecto como carpeta donde se encuentra los templates
os.path.join(PROJECT_PATH,'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin', ## AVISO ## Linea descomentada, activa el acceso al panel de administracion
## AVISO ## Linea descomentada, activa el acceso a la documentacion del panel de administracion
'django.contrib.admindocs',
#añadimos la aplicación subasta
'subasta',
'usuarios',
'articulos',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | 8,352,776,375,811,243,000 | 35.896341 | 129 | 0.691786 | false |
mblaauw/pre-publish-predictor | alchemyapi_python/tests.py | 1 | 7384 | #!/usr/bin/env python
# Copyright 2013 AlchemyAPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
\
from __future__ import print_function
from alchemyapi import AlchemyAPI
test_text = 'Bob broke my heart, and then made up this silly sentence to test the PHP SDK'
test_html = '<html><head><title>The best SDK Test | AlchemyAPI</title></head><body><h1>Hello World!</h1><p>My favorite language is PHP</p></body></html>'
test_url = 'http://www.nytimes.com/2013/07/13/us/politics/a-day-of-friction-notable-even-for-a-fractious-congress.html?_r=0'
alchemyapi = AlchemyAPI()
#Entities
print('Checking entities . . . ')
response = alchemyapi.entities('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.entities('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.entities('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.entities('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Entity tests complete!')
print('')
#Keywords
print('Checking keywords . . . ')
response = alchemyapi.keywords('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.keywords('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.keywords('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.keywords('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Keyword tests complete!')
print('')
#Concepts
print('Checking concepts . . . ')
response = alchemyapi.concepts('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.concepts('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.concepts('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.concepts('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Concept tests complete!')
print('')
#Sentiment
print('Checking sentiment . . . ')
response = alchemyapi.sentiment('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.sentiment('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.sentiment('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.sentiment('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Sentiment tests complete!')
print('')
#Targeted Sentiment
print('Checking targeted sentiment . . . ')
response = alchemyapi.sentiment_targeted('text', test_text, 'heart');
assert(response['status'] == 'OK')
response = alchemyapi.sentiment_targeted('html', test_html, 'language');
assert(response['status'] == 'OK')
response = alchemyapi.sentiment_targeted('url', test_url, 'Congress');
assert(response['status'] == 'OK')
response = alchemyapi.sentiment_targeted('random', test_url, 'Congress');
assert(response['status'] == 'ERROR') #invalid flavor
response = alchemyapi.sentiment_targeted('text', test_text, None);
assert(response['status'] == 'ERROR') #missing target
print('Targeted sentiment tests complete!')
print('')
#Text
print('Checking text . . . ')
response = alchemyapi.text('text', test_text);
assert(response['status'] == 'ERROR') #only works for html and url content
response = alchemyapi.text('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.text('url', test_url);
assert(response['status'] == 'OK')
print('Text tests complete!')
print('')
#Text Raw
print('Checking raw text . . . ')
response = alchemyapi.text_raw('text', test_text);
assert(response['status'] == 'ERROR') #only works for html and url content
response = alchemyapi.text_raw('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.text_raw('url', test_url);
assert(response['status'] == 'OK')
print('Raw text tests complete!')
print('')
#Author
print('Checking author . . . ')
response = alchemyapi.author('text', test_text);
assert(response['status'] == 'ERROR') #only works for html and url content
response = alchemyapi.author('html', test_html);
assert(response['status'] == 'ERROR') #there's no author in the test HTML
response = alchemyapi.author('url', test_url);
assert(response['status'] == 'OK')
print('Author tests complete!')
print('')
#Language
print('Checking language . . . ')
response = alchemyapi.language('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.language('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.language('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.language('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Language tests complete!')
print('')
#Title
print('Checking title . . . ')
response = alchemyapi.title('text', test_text);
assert(response['status'] == 'ERROR') #only works for html and url content
response = alchemyapi.title('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.title('url', test_url);
assert(response['status'] == 'OK')
print('Title tests complete!')
print('')
#Relations
print('Checking relations . . . ')
response = alchemyapi.relations('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.relations('html', test_html);
assert(response['status'] == 'OK')
response = alchemyapi.relations('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.relations('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Relation tests complete!')
print('')
#Category
print('Checking category . . . ')
response = alchemyapi.category('text', test_text);
assert(response['status'] == 'OK')
response = alchemyapi.category('html', test_html, {'url':'test'});
assert(response['status'] == 'OK')
response = alchemyapi.category('url', test_url);
assert(response['status'] == 'OK')
response = alchemyapi.category('random', test_url);
assert(response['status'] == 'ERROR') #invalid flavor
print('Category tests complete!')
print('')
#Feeds
print('Checking feeds . . . ')
response = alchemyapi.feeds('text', test_text);
assert(response['status'] == 'ERROR') #only works for html and url content
response = alchemyapi.feeds('html', test_html, {'url':'test'});
assert(response['status'] == 'OK')
response = alchemyapi.feeds('url', test_url);
assert(response['status'] == 'OK')
print('Feed tests complete!')
print('')
#Microformats
print('Checking microformats . . . ')
response = alchemyapi.microformats('text', test_text);
assert(response['status'] == 'ERROR') #only works for html and url content
response = alchemyapi.microformats('html', test_html, {'url':'test'});
assert(response['status'] == 'OK')
response = alchemyapi.microformats('url', test_url);
assert(response['status'] == 'OK')
print('Microformat tests complete!')
print('')
print('')
print('**** All tests complete! ****')
| mit | -8,312,624,307,603,858,000 | 30.555556 | 153 | 0.697589 | false |
yarikoptic/NiPy-OLD | examples/interfaces/process_fiac.py | 1 | 6055 | ''' Single subject analysis script for SPM / FIAC '''
import sys
from os.path import join as pjoin
from glob import glob
import numpy as np
from nipy.interfaces.spm import spm_info, make_job, scans_for_fnames, \
run_jobdef, fnames_presuffix, fname_presuffix, fltcols
def get_data(data_path, subj_id):
data_def = {}
subject_path = pjoin(data_path, 'fiac%s' % subj_id)
data_def['functionals'] = sorted(
glob(pjoin(subject_path, 'functional_*.nii')))
anatomicals = glob(pjoin(subject_path, 'anatomical.nii'))
if len(anatomicals) == 1:
data_def['anatomical'] = anatomicals[0]
elif len(anatomicals) == 0:
data_def['anatomical'] = None
else:
raise ValueError('Too many anatomicals')
return data_def
def slicetime(data_def):
sess_scans = scans_for_fnames(data_def['functionals'])
stinfo = make_job('temporal', 'st', {
'scans': sess_scans,
'so':range(1,31,2) + range(2,31,2),
'tr':2.5,
'ta':2.407,
'nslices':float(30),
'refslice':1
})
run_jobdef(stinfo)
def realign(data_def):
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a'))
rinfo = make_job('spatial', 'realign', [{
'estimate':{
'data':sess_scans,
'eoptions':{
'quality':0.9,
'sep':4.0,
'fwhm':5.0,
'rtm':True,
'interp':2.0,
'wrap':[0.0,0.0,0.0],
'weight':[]
}
}
}])
run_jobdef(rinfo)
def reslice(data_def):
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a'))
rsinfo = make_job('spatial', 'realign', [{
'write':{
'data': np.vstack(sess_scans.flat),
'roptions':{
'which':[2, 1],
'interp':4.0,
'wrap':[0.0,0.0,0.0],
'mask':True,
}
}
}])
run_jobdef(rsinfo)
def coregister(data_def):
func1 = data_def['functionals'][0]
mean_fname = fname_presuffix(func1, 'meana')
crinfo = make_job('spatial', 'coreg', [{
'estimate':{
'ref': [mean_fname],
'source': [data_def['anatomical']],
'other': [[]],
'eoptions':{
'cost_fun':'nmi',
'sep':[4.0, 2.0],
'tol':np.array(
[0.02,0.02,0.02,
0.001,0.001,0.001,
0.01,0.01,0.01,
0.001,0.001,0.001]).reshape(1,12),
'fwhm':[7.0, 7.0]
}
}
}])
run_jobdef(crinfo)
def segnorm(data_def):
def_tpms = np.zeros((3,1), dtype=np.object)
spm_path = spm_info.spm_path
def_tpms[0] = pjoin(spm_path, 'tpm', 'grey.nii'),
def_tpms[1] = pjoin(spm_path, 'tpm', 'white.nii'),
def_tpms[2] = pjoin(spm_path, 'tpm', 'csf.nii')
data = np.zeros((1,), dtype=object)
data[0] = data_def['anatomical']
sninfo = make_job('spatial', 'preproc', {
'data': data,
'output':{
'GM':fltcols([0,0,1]),
'WM':fltcols([0,0,1]),
'CSF':fltcols([0,0,0]),
'biascor':1.0,
'cleanup':False,
},
'opts':{
'tpm':def_tpms,
'ngaus':fltcols([2,2,2,4]),
'regtype':'mni',
'warpreg':1.0,
'warpco':25.0,
'biasreg':0.0001,
'biasfwhm':60.0,
'samp':3.0,
'msk':np.array([], dtype=object),
}
})
run_jobdef(sninfo)
def norm_write(data_def):
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a'))
matname = fname_presuffix(data_def['anatomical'],
suffix='_seg_sn.mat',
use_ext=False)
subj = {
'matname': np.zeros((1,), dtype=object),
'resample': np.vstack(sess_scans.flat),
}
subj['matname'][0] = matname
roptions = {
'preserve':False,
'bb':np.array([[-78,-112, -50],[78,76,85.0]]),
'vox':fltcols([2.0,2.0,2.0]),
'interp':1.0,
'wrap':[0.0,0.0,0.0],
}
nwinfo = make_job('spatial', 'normalise', [{
'write':{
'subj': subj,
'roptions': roptions,
}
}])
run_jobdef(nwinfo)
# knock out the list of images, replacing with only one
subj['resample'] = np.zeros((1,), dtype=object)
subj['resample'][0] = data_def['anatomical']
roptions['interp'] = 4.0
run_jobdef(nwinfo)
def smooth(data_def, fwhm=8.0):
try:
len(fwhm)
except TypeError:
fwhm = [fwhm] * 3
fwhm = np.asarray(fwhm, dtype=np.float).reshape(1,3)
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'wa'))
sinfo = make_job('spatial', 'smooth',
{'data':np.vstack(sess_scans.flat),
'fwhm':fwhm,
'dtype':0})
run_jobdef(sinfo)
def process_subject(ddef):
if not ddef['anatomical']:
return
slicetime(ddef)
realign(ddef)
reslice(ddef)
coregister(ddef)
segnorm(ddef)
norm_write(ddef)
smooth(ddef)
def process_subjects(data_path, subj_ids):
for subj_id in subj_ids:
ddef = get_data(data_path, subj_id)
process_subject(ddef)
if __name__ == '__main__':
try:
data_path = sys.argv[1]
except IndexError:
raise OSError('Need FIAC data path as input')
try:
subj_ids = sys.argv[2:]
except IndexError:
subj_ids = range(16)
process_subjects(data_path, subj_ids)
| bsd-3-clause | -6,172,351,069,423,954,000 | 29.124378 | 82 | 0.471181 | false |
xesscorp/skidl | skidl/bus.py | 1 | 16133 | # -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Handles buses.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import range, str, super
from future import standard_library
from .alias import Alias
from .common import *
from .defines import *
from .logger import logger
from .net import Net
from .netpinlist import NetPinList
from .pin import Pin
from .skidlbaseobj import SkidlBaseObject
from .utilities import *
standard_library.install_aliases()
class Bus(SkidlBaseObject):
"""
This class collects one or more nets into a group that can be indexed.
Args:
name: A string with the name of the bus.
args: A list of ints, pins, nets, buses to attach to the net.
Keyword Args:
attribs: A dictionary of attributes and values to attach to
the Net object.
Example:
::
n = Net()
led1 = Part("Device", 'LED')
b = Bus('B', 8, n, led1['K'])
"""
@classmethod
def get(cls, name, circuit=None):
"""Get the bus with the given name from a circuit, or return None."""
if not circuit:
circuit = builtins.default_circuit
search_params = (
("name", name, True),
("aliases", name, True),
# ('name', ''.join(('.*',name,'.*')), False),
# ('aliases', Alias(''.join(('.*',name,'.*'))), False)
)
for attr, name, do_str_match in search_params:
buses = filter_list(
circuit.buses, do_str_match=do_str_match, **{attr: name}
)
if buses:
return list_or_scalar(buses)
return None
@classmethod
def fetch(cls, name, *args, **attribs):
"""Get the bus with the given name from a circuit, or create it if not found."""
circuit = attribs.get("circuit", builtins.default_circuit)
return cls.get(name, circuit=circuit) or cls(name, *args, **attribs)
def __init__(self, *args, **attribs):
super().__init__()
# Define the member storing the nets so it's present, but it starts empty.
self.nets = []
# For Bus objects, the circuit object the bus is a member of is passed
# in with all the other attributes. If a circuit object isn't provided,
# then the default circuit object is added to the attributes.
attribs["circuit"] = attribs.get("circuit", default_circuit)
# Scan through the kwargs and args to see if there is a name for this bus.
name = attribs.pop("name", None)
if not name:
try:
# The first string found will be the bus name.
name = [a for a in args if isinstance(a, (basestring, type(None)))][0]
# Remove the name from the list of things to be added to the bus.
args = list(args)
args.remove(name)
# args = [a for a in args if a != name]
except IndexError:
# No explicit bus name found, so generate an implicit one.
name = None
# Attach additional attributes to the bus. (The Circuit object also gets
# set here.)
for k, v in list(attribs.items()):
setattr(self, k, v)
# The bus name is set after the circuit is assigned so the name can be
# checked against the other bus names already in that circuit.
self.name = name
# Add the bus to the circuit.
self.circuit = None # Make sure bus isn't seen as part of circuit.
attribs["circuit"] += self # Add bus to circuit (also sets self.circuit).
# Build the bus from net widths, existing nets, nets of pins, other buses.
self.extend(args)
def extend(self, *objects):
"""Extend bus by appending objects to the end (MSB)."""
self.insert(len(self.nets), objects)
def insert(self, index, *objects):
"""Insert objects into bus starting at indexed position."""
for obj in flatten(objects):
if isinstance(obj, int):
# Add a number of new nets to the bus.
for _ in range(obj):
self.nets.insert(index, Net())
index += obj
elif isinstance(obj, Net):
# Add an existing net to the bus.
self.nets.insert(index, obj)
index += 1
elif isinstance(obj, Pin):
# Add a pin to the bus.
try:
# Add the pin's net to the bus.
self.nets.insert(index, obj.get_nets()[0])
except IndexError:
# OK, the pin wasn't already connected to a net,
# so create a new net, add it to the bus, and
# connect the pin to it.
n = Net()
n += obj
self.nets.insert(index, n)
index += 1
elif isinstance(obj, Bus):
# Add an existing bus to this bus.
for n in reversed(obj.nets):
self.nets.insert(index, n)
index += len(obj)
else:
log_and_raise(
logger,
ValueError,
"Adding illegal type of object ({}) to Bus {}.".format(
type(obj), self.name
),
)
# Assign names to all the unnamed nets in the bus.
# Separate index from bus name if name ends with number.
sep = '_' if self.name[-1].isdigit() else ''
for i, net in enumerate(self.nets):
if net.is_implicit():
# Net names are the bus name with the index appended.
net.name = self.name + sep + str(i)
def get_nets(self):
"""Return the list of nets contained in this bus."""
return to_list(self.nets)
def get_pins(self):
"""It's an error to get the list of pins attached to all bus lines."""
log_and_raise(logger, TypeError, "Can't get the list of pins on a bus!")
def copy(self, num_copies=None, **attribs):
"""
Make zero or more copies of this bus.
Args:
num_copies: Number of copies to make of this bus.
Keyword Args:
attribs: Name/value pairs for setting attributes for the copy.
Returns:
A list of Bus copies or a Bus if num_copies==1.
Raises:
Exception if the requested number of copies is a non-integer or negative.
Notes:
An instance of a bus can be copied just by calling it like so::
b = Bus('A', 8) # Create a bus.
b_copy = b(2) # Get two copies of the bus.
You can also use the multiplication operator to make copies::
b = 10 * Bus('A', 8) # Create an array of buses.
"""
# If the number of copies is None, then a single copy will be made
# and returned as a scalar (not a list). Otherwise, the number of
# copies will be set by the num_copies parameter or the number of
# values supplied for each part attribute.
num_copies_attribs = find_num_copies(**attribs)
return_list = (num_copies is not None) or (num_copies_attribs > 1)
if num_copies is None:
num_copies = max(1, num_copies_attribs)
# Check that a valid number of copies is requested.
if not isinstance(num_copies, int):
log_and_raise(
logger,
ValueError,
"Can't make a non-integer number ({}) of copies of a bus!".format(
num_copies
),
)
if num_copies < 0:
log_and_raise(
logger,
ValueError,
"Can't make a negative number ({}) of copies of a bus!".format(
num_copies
),
)
copies = []
for i in range(num_copies):
cpy = Bus(self.name, self)
# Attach additional attributes to the bus.
for k, v in list(attribs.items()):
if isinstance(v, (list, tuple)):
try:
v = v[i]
except IndexError:
log_and_raise(
logger,
ValueError,
"{} copies of bus {} were requested, but too few elements in attribute {}!".format(
num_copies, self.name, k
),
)
setattr(cpy, k, v)
copies.append(cpy)
# Return a list of the copies made or just a single copy.
if return_list:
return copies
return copies[0]
# Make copies with the multiplication operator or by calling the object.
__call__ = copy
def __mul__(self, num_copies):
if num_copies is None:
num_copies = 0
return self.copy(num_copies=num_copies)
__rmul__ = __mul__
def __getitem__(self, *ids):
"""
Return a bus made up of the nets at the given indices.
Args:
ids: A list of indices of bus lines. These can be individual
numbers, net names, nested lists, or slices.
Returns:
A bus if the indices are valid, otherwise None.
"""
# Use the indices to get the nets from the bus.
nets = []
for ident in expand_indices(0, len(self) - 1, False, *ids):
if isinstance(ident, int):
nets.append(self.nets[ident])
elif isinstance(ident, basestring):
nets.extend(filter_list(self.nets, name=ident))
else:
log_and_raise(
logger, TypeError, "Can't index bus with a {}.".format(type(ident))
)
if len(nets) == 0:
# No nets were selected from the bus, so return None.
return None
if len(nets) == 1:
# Just one net selected, so return the Net object.
return nets[0]
# Multiple nets selected, so return them as a NetPinList list.
return NetPinList(nets)
def __setitem__(self, ids, *pins_nets_buses):
"""
You can't assign to bus lines. You must use the += operator.
This method is a work-around that allows the use of the += for making
connections to bus lines while prohibiting direct assignment. Python
processes something like my_bus[7:0] += 8 * Pin() as follows::
1. Bus.__getitem__ is called with '7:0' as the index. This
returns a NetPinList of eight nets from my_bus.
2. The NetPinList.__iadd__ method is passed the NetPinList and
the thing to connect to the it (eight pins in this case). This
method makes the actual connection to the part pin or pins. Then
it creates an iadd_flag attribute in the object it returns.
3. Finally, Bus.__setitem__ is called. If the iadd_flag attribute
is true in the passed argument, then __setitem__ was entered
as part of processing the += operator. If there is no
iadd_flag attribute, then __setitem__ was entered as a result
of using a direct assignment, which is not allowed.
"""
# If the iadd_flag is set, then it's OK that we got
# here and don't issue an error. Also, delete the flag.
if getattr(pins_nets_buses[0], "iadd_flag", False):
del pins_nets_buses[0].iadd_flag
return
# No iadd_flag or it wasn't set. This means a direct assignment
# was made to the pin, which is not allowed.
log_and_raise(logger, TypeError, "Can't assign to a bus! Use the += operator.")
def __iter__(self):
"""
Return an iterator for stepping thru individual lines of the bus.
"""
return (self[l] for l in range(len(self))) # Return generator expr.
def is_movable(self):
"""
Return true if the bus is movable to another circuit.
A bus is movable if all the nets in it are movable.
"""
for n in self.nets:
if not n.is_movable():
# One net not movable means the entire Bus is not movable.
return False
return True # All the nets were movable.
def is_implicit(self):
"""Return true if the bus name is implicit."""
from .defines import NET_PREFIX, BUS_PREFIX
prefix_re = "({}|{})+".format(re.escape(NET_PREFIX), re.escape(BUS_PREFIX))
return re.match(prefix_re, self.name)
def connect(self, *pins_nets_buses):
"""
Return the bus after connecting one or more nets, pins, or buses.
Args:
pins_nets_buses: One or more Pin, Net or Bus objects or
lists/tuples of them.
Returns:
The updated bus with the new connections.
Notes:
You can connect nets or pins to a bus like so::
p = Pin() # Create a pin.
n = Net() # Create a net.
b = Bus('B', 2) # Create a two-wire bus.
b += p,n # Connect pin and net to B[0] and B[1].
"""
nets = NetPinList(self.nets)
nets += pins_nets_buses
return self
__iadd__ = connect
@property
def name(self):
"""
Get, set and delete the name of the bus.
When setting the bus name, if another bus with the same name
is found, the name for this bus is adjusted to make it unique.
"""
return self._name
@name.setter
def name(self, name):
# Remove the existing name so it doesn't cause a collision if the
# object is renamed with its existing name.
self._name = None
# Now name the object with the given name or some variation
# of it that doesn't collide with anything else in the list.
self._name = get_unique_name(self.circuit.buses, "name", BUS_PREFIX, name)
@name.deleter
def name(self):
"""Delete the bus name."""
del self._name
def __str__(self):
"""Return a list of the nets in this bus as a string."""
return self.name + ":\n\t" + "\n\t".join([n.__str__() for n in self.nets])
__repr__ = __str__
def __len__(self):
"""Return the number of nets in this bus."""
return len(self.nets)
@property
def width(self):
"""Return width of a Bus, which is the same as using the len() operator."""
return len(self)
def __bool__(self):
"""Any valid Bus is True"""
return True
__nonzero__ = __bool__ # Python 2 compatibility.
| mit | -8,701,624,065,980,637,000 | 34.613687 | 111 | 0.556065 | false |
ereOn/pyslot | doc/source/conf.py | 1 | 9568 | # -*- coding: utf-8 -*-
#
# PySlot documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 5 22:24:39 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySlot'
copyright = u'2016, Julien Kauffmann'
author = u'Julien Kauffmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open(os.path.join(
os.path.dirname(__file__),
'..',
'..',
'VERSION',
)).read().rstrip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySlotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PySlot.tex', u'PySlot Documentation',
u'Julien Kauffmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyslot', u'PySlot Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PySlot', u'PySlot Documentation',
author, 'PySlot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| gpl-3.0 | 8,700,318,934,928,059,000 | 31.107383 | 79 | 0.704849 | false |
MicheleTobias/CurvilinearAnisotropy | Code/WillametteFiles_Centerline.py | 1 | 1734 | # import the modules I'm gonna need
import glob, string, csv, os
# input the files to use
inputdirectory = 'C:\Users\Michele\Documents\Research\CurvilinearAnisotropy\WillametteRiver\willamette_elevations\willamette\centerline_elevation\\'
outputfile1 = 'C:\Users\Michele\Documents\Research\CurvilinearAnisotropy\WillametteRiver\willamette_elevations\willamette\PythonOutput\\WillamettePoints_Centerline.txt'
#outputfile2 = 'C:\Documents and Settings\Michele Tobias\My Documents\Davis\Research\GIS Data\DataOutput\\SBV_average.txt'
filemake = open(outputfile1,'w')
filemake.close()
#filemake = open(outputfile2,'w')
#filemake.close()
data = []
fulldata = []
#add *.txt to the end of the inputdirectory
inputdirectory += '*.txt'
#---------Copying the $GPGGA Lines to their own File--------------
# find the text files you need to work with
textfiles = glob.glob(inputdirectory)
#print textfiles
#for writing the column names at the top of the output file
columnnames = ['Easting\tNorthing\tBed_Elevation']
#finding the lines I need and writing them to the output file under the column names
writer = csv.writer(open(outputfile1, 'w+'))
writer.writerow(columnnames)
#print textfiles
for i in textfiles:
#shortdoc = os.path.basename(i)
#point = shortdoc.rstrip(".txt")
#point = shortdoc[shortdoc.find(' ')+1: shortdoc.find('.')]
reader = csv.reader(open(i, "r"))
data = [row for row in reader]
rownum=0
for j in data:
if rownum >1:
writer.writerow(j)
#fulldata.append(j)
rownum += 1
#j.append(point)
#if j[0] != '#':
# writer.writerow(j)
# fulldata.append(j)
# #print j
#rownum += 1
print 'Finished!'
| gpl-2.0 | 6,246,451,483,380,688,000 | 30.527273 | 168 | 0.686275 | false |
garbas/mozilla-releng-services | lib/cli_common/cli_common/log.py | 1 | 5277 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import os
import structlog
import logbook
import structlog.exceptions
CHANNELS = [
'master',
'staging',
'production',
]
class UnstructuredRenderer(structlog.processors.KeyValueRenderer):
def __call__(self, logger, method_name, event_dict):
event = None
if 'event' in event_dict:
event = event_dict.pop('event')
if event_dict or event is None:
# if there are other keys, use the parent class to render them
# and append to the event
rendered = super(UnstructuredRenderer, self).__call__(
logger, method_name, event_dict)
return '%s (%s)' % (event, rendered)
else:
return event
def setup_mozdef(project_name, channel, MOZDEF):
'''
Setup mozdef using taskcluster secrets
'''
import mozdef_client
sevirity_map = {
'critical': mozdef_client.MozDefEvent.SEVERITY_CRITICAL,
'error': mozdef_client.MozDefEvent.SEVERITY_ERROR,
'warning': mozdef_client.MozDefEvent.SEVERITY_WARNING,
'info': mozdef_client.MozDefEvent.SEVERITY_INFO,
'debug': mozdef_client.MozDefEvent.SEVERITY_DEBUG,
}
def send(logger, method_name, event_dict):
# only send to mozdef if `mozdef` is set
if event_dict.pop('mozdef', False):
msg = mozdef_client.MozDefEvent(MOZDEF)
msg.summary = event_dict.get('event', '')
msg.tags = [
'mozilla-releng/services/' + channel,
project_name,
]
if set(event_dict) - {'event'}:
msg.details = event_dict.copy()
msg.details.pop('event', None)
msg.source = logger.name
msg.set_severity(
sevirity_map.get(
method_name,
mozdef_client.MozDefEvent.SEVERITY_INFO,
),
)
msg.send()
return event_dict
return send
def setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT):
'''
Setup papertrail account using taskcluster secrets
'''
# Setup papertrail
papertrail = logbook.SyslogHandler(
application_name='mozilla-releng/services/{}/{}'.format(channel, project_name),
address=(PAPERTRAIL_HOST, int(PAPERTRAIL_PORT)),
format_string='{record.time} {record.channel}: {record.message}',
bubble=True,
)
papertrail.push_application()
def setup_sentry(project_name, channel, SENTRY_DSN):
'''
Setup sentry account using taskcluster secrets
'''
from raven import Client
from raven.handlers.logbook import SentryHandler
sentry_client = Client(
dsn=SENTRY_DSN,
site=project_name,
name='mozilla-releng/services',
environment=channel,
# TODO:
# release=read(VERSION) we need to promote that as well via secrets
# tags=...
# repos=...
)
sentry = SentryHandler(sentry_client, level=logbook.WARNING, bubble=True)
sentry.push_application()
def init_logger(project_name,
channel=None,
level=logbook.INFO,
handler=None,
PAPERTRAIL_HOST=None,
PAPERTRAIL_PORT=None,
SENTRY_DSN=None,
MOZDEF=None
):
if not channel:
channel = os.environ.get('APP_CHANNEL')
if channel and channel not in CHANNELS:
raise Exception('Initilizing logging with channel `{}`. It should be one of: {}'.format(channel, ', '.join(CHANNELS)))
# By default utput logs on stderr
if handler is None:
fmt = '{record.channel}: {record.message}'
handler = logbook.StderrHandler(level=level, format_string=fmt)
handler.push_application()
# Log to papertrail
if channel and PAPERTRAIL_HOST and PAPERTRAIL_PORT:
setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT)
# Log to sentry
if channel and SENTRY_DSN:
setup_sentry(project_name, channel, SENTRY_DSN)
def logbook_factory(*args, **kwargs):
# Logger given to structlog
logbook.compat.redirect_logging()
return logbook.Logger(level=level, *args, **kwargs)
# Setup structlog over logbook
processors = [
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
]
# send to mozdef before formatting into a string
if channel and MOZDEF:
processors.append(setup_mozdef(project_name, channel, MOZDEF))
processors.append(UnstructuredRenderer())
structlog.configure(
context_class=structlog.threadlocal.wrap_dict(dict),
processors=processors,
logger_factory=logbook_factory,
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def get_logger(*args, **kwargs):
return structlog.get_logger(*args, **kwargs)
| mpl-2.0 | -6,548,193,065,109,681,000 | 28.480447 | 126 | 0.617396 | false |
zfrenchee/pandas | pandas/core/api.py | 1 | 3146 |
# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
from pandas.core.algorithms import factorize, unique, value_counts
from pandas.core.dtypes.missing import isna, isnull, notna, notnull
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.io.formats.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
UInt64Index, RangeIndex, Float64Index,
MultiIndex, IntervalIndex,
TimedeltaIndex, DatetimeIndex,
PeriodIndex, NaT)
from pandas.core.indexes.period import Period, period_range, pnow
from pandas.core.indexes.timedeltas import Timedelta, timedelta_range
from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range
from pandas.core.indexes.interval import Interval, interval_range
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
# TODO: Remove import when statsmodels updates #18264
from pandas.core.reshape.reshape import get_dummies
from pandas.core.indexing import IndexSlice
from pandas.core.tools.numeric import to_numeric
from pandas.tseries.offsets import DateOffset
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
# see gh-14094.
from pandas.util._depr_module import _DeprecatedModule
_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay',
'customBusinessMonthEnd', 'customBusinessMonthBegin',
'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin',
'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd',
'byearEnd', 'week']
datetools = _DeprecatedModule(deprmod='pandas.core.datetools',
removals=_removals)
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
# deprecation, xref #13790
def match(*args, **kwargs):
import warnings
warnings.warn("pd.match() is deprecated and will be removed "
"in a future version",
FutureWarning, stacklevel=2)
from pandas.core.algorithms import match
return match(*args, **kwargs)
def groupby(*args, **kwargs):
import warnings
warnings.warn("pd.groupby() is deprecated and will be removed; "
"Please use the Series.groupby() or "
"DataFrame.groupby() methods",
FutureWarning, stacklevel=2)
return args[0].groupby(*args[1:], **kwargs)
# Deprecation: xref gh-16747
class TimeGrouper(object):
def __new__(cls, *args, **kwargs):
from pandas.core.resample import TimeGrouper
import warnings
warnings.warn("pd.TimeGrouper is deprecated and will be removed; "
"Please use pd.Grouper(freq=...)",
FutureWarning, stacklevel=2)
return TimeGrouper(*args, **kwargs)
| bsd-3-clause | 2,358,515,945,609,053,000 | 37.839506 | 76 | 0.679275 | false |
siddhantgoel/tornado-sqlalchemy | tests/test_session_mixin.py | 1 | 1642 | from unittest.mock import Mock
from tornado_sqlalchemy import MissingDatabaseSettingError, SessionMixin
from ._common import BaseTestCase, User, db
class SessionMixinTestCase(BaseTestCase):
def test_mixin_ok(self):
class GoodHandler(SessionMixin):
def __init__(h_self):
h_self.application = Mock()
h_self.application.settings = {'db': db}
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertEqual(GoodHandler().run(), 0)
def test_mixin_no_db(self):
class BadHandler(SessionMixin):
def __init__(h_self):
h_self.application = Mock()
h_self.application.settings = {}
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertRaises(MissingDatabaseSettingError, BadHandler().run)
def test_distinct_sessions(self):
sessions = set()
class Handler(SessionMixin):
def __init__(h_self):
db.configure(url=self.db_url)
h_self.application = Mock()
h_self.application.settings = {'db': db}
def run(h_self):
session = h_self.session
sessions.add(id(session))
value = session.query(User).count()
session.commit()
session.close()
return value
Handler().run()
Handler().run()
self.assertEqual(len(sessions), 2)
| mit | -7,001,430,187,323,076,000 | 27.807018 | 72 | 0.545676 | false |
detrout/pykolab | pykolab/cli/telemetry/cmd_examine_session.py | 1 | 4119 |
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 or, at your option, any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import pykolab
from pykolab.translate import _
log = pykolab.getLogger('pykolab.cli')
conf = pykolab.getConf()
from pykolab import telemetry
from pykolab.cli import commands
def __init__():
commands.register('examine_session', execute, group='telemetry', description="Examine a Telemetry session.")
def execute(*args, **kw):
db = telemetry.init_db()
wanted = False
if session_id == None:
try:
wanted = conf.cli_args.pop(0)
except:
log.error(_("Unspecified session identifier"))
sys.exit(1)
if not wanted:
wanted = session_id
session_wanted = None
try:
_wanted = (int)(wanted)
session_wanted = _wanted
except:
user_wanted = wanted
if not session_wanted == None:
session = db.query(
telemetry.TelemetrySession
).filter_by(
id=session_wanted
).first()
if session == None:
log.error(_("Invalid session identifier"))
sys.exit(1)
user = db.query(
telemetry.TelemetryUser
).filter_by(
id=session.user_id
).first()
server = db.query(
telemetry.TelemetryServer
).filter_by(
id=session.server_id
).first()
else:
user = db.query(
telemetry.TelemetryUser
).filter_by(
sasl_username=user_wanted
).first()
sessions = db.query(
telemetry.TelemetrySession
).filter_by(
user_id=user.id
).order_by(
telemetry.telemetry_session_table.c.start
)
for session in sessions:
self.action_telemetry_examine_session(session_id=session.id)
return
print _("Session by %s on server %s") % (user.sasl_username,server.fqdn)
command_issues = db.query(
telemetry.TelemetryCommandIssue
).filter_by(
session_id=session.id
)
for command_issue in command_issues:
command = db.query(
telemetry.TelemetryCommand
).filter_by(
id=command_issue.command_id
).first()
command_arg = db.query(
telemetry.TelemetryCommandArg
).filter_by(
id=command_issue.command_arg_id
).first()
print "Client(%d): %s %s %s" % (
command_issue.id,
command_issue.command_tag,
command.command,
command_arg.command_arg
)
server_responses = db.query(
telemetry.TelemetryServerResponse
).filter_by(
command_issue_id=command_issue.id
)
for server_response in server_responses:
server_response_lines = server_response.response.split('\n');
for server_response_line in server_response_lines:
print "Server(%d): %s" % (
server_response.id,
server_response_line
)
| gpl-3.0 | 592,902,317,208,415,900 | 28.212766 | 112 | 0.554989 | false |
gltn/stdm | stdm/ui/view_str.py | 1 | 44716 | """
/***************************************************************************
Name : View STR Relationships
Description : Main Window for searching and browsing the social tenure
relationship of the participating entities.
Date : 24/May/2014
copyright : (C) 2014 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import logging
from collections import OrderedDict
from datetime import date
from qgis.PyQt import uic
from qgis.PyQt.QtCore import (
QTimer,
Qt,
QSize,
QObject,
pyqtSignal,
QThread,
QRegExp,
QSortFilterProxyModel,
pyqtSlot
)
from qgis.PyQt.QtWidgets import (
QMainWindow,
QDesktopWidget,
QToolBar,
QAction,
QApplication,
QProgressDialog,
QProgressBar,
QMessageBox,
QVBoxLayout,
QWidget,
QScrollArea,
QFrame,
QCheckBox,
QTabBar,
QCompleter
)
from qgis.core import QgsProject
from qgis.utils import (
iface
)
from sqlalchemy import exc
from sqlalchemy import (
func,
String
)
from stdm.data import globals
from stdm.data.configuration import entity_model
from stdm.data.database import Content
from stdm.data.pg_utils import pg_table_count
from stdm.data.qtmodels import (
BaseSTDMTableModel
)
from stdm.exceptions import DummyException
from stdm.security.authorization import Authorizer
from stdm.settings import current_profile
from stdm.ui.feature_details import DetailsTreeView
from stdm.ui.forms.widgets import ColumnWidgetRegistry
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.notification import (
NotificationBar
)
from stdm.ui.social_tenure.str_editor import STREditor
from stdm.ui.sourcedocument import (
SourceDocumentManager,
DocumentWidget
)
from stdm.ui.spatial_unit_manager import SpatialUnitManagerDockWidget
from stdm.utils.util import (
entity_searchable_columns,
entity_display_columns,
format_name,
lookup_parent_entity
)
LOGGER = logging.getLogger('stdm')
WIDGET, BASE = uic.loadUiType(
GuiUtils.get_ui_file_path('ui_view_str.ui'))
class ViewSTRWidget(WIDGET, BASE):
"""
Search and browse the social tenure relationship
of all participating entities.
"""
def __init__(self, plugin):
QMainWindow.__init__(self, plugin.iface.mainWindow())
self.setupUi(self)
self.btnSearch.setIcon(GuiUtils.get_icon('search.png'))
self.btnClearSearch.setIcon(GuiUtils.get_icon('reset.png'))
self._plugin = plugin
self.search_done = False
# self.tbPropertyPreview.set_iface(self._plugin.iface)
QTimer.singleShot(
100, lambda: self.tbPropertyPreview.set_iface(self._plugin.iface))
self.curr_profile = current_profile()
self.spatial_units = self.curr_profile.social_tenure.spatial_units
# Center me
self.move(QDesktopWidget().availableGeometry().center() -
self.frameGeometry().center())
self.sp_unit_manager = SpatialUnitManagerDockWidget(
self._plugin.iface, self._plugin
)
self.geom_cols = []
for spatial_unit in self.spatial_units:
each_geom_col = self.sp_unit_manager.geom_columns(spatial_unit)
self.geom_cols.extend(each_geom_col)
# Configure notification bar
self._notif_search_config = NotificationBar(
self.vl_notification
)
# set whether currently logged in user has
# permissions to edit existing STR records
self._can_edit = self._plugin.STRCntGroup.canUpdate()
self._can_delete = self._plugin.STRCntGroup.canDelete()
self._can_create = self._plugin.STRCntGroup.canCreate()
# Variable used to store a reference to the
# currently selected social tenure relationship
# when displaying documents in the supporting documents tab window.
# This ensures that there are no duplicates
# when the same item is selected over and over again.
self._strID = None
self.removed_docs = None
# Used to store the root hash of the currently selected node.
self._curr_rootnode_hash = ""
self.str_model, self.str_doc_model = entity_model(
self.curr_profile.social_tenure, False, True
)
self._source_doc_manager = SourceDocumentManager(
self.curr_profile.social_tenure.supporting_doc,
self.str_doc_model,
self
)
self._source_doc_manager.documentRemoved.connect(
self.onSourceDocumentRemoved
)
self._source_doc_manager.setEditPermissions(False)
self.addSTR = None
self.editSTR = None
self.deleteSTR = None
self.initGui()
self.add_spatial_unit_layer()
self.details_tree_view = DetailsTreeView(parent=self, plugin=self._plugin)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.details_tree_view)
self.str_tree_container.setLayout(layout)
# else:
# self.details_tree_view = self._plugin.details_tree_view
self.details_tree_view.activate_feature_details(True)
self.details_tree_view.model.clear()
count = pg_table_count(self.curr_profile.social_tenure.name)
self.setWindowTitle(
self.tr('{}{}'.format(
self.windowTitle(), '- ' + str(count) + ' rows'
))
)
self.active_spu_id = -1
self.toolBox.setStyleSheet(
'''
QToolBox::tab {
background: qlineargradient(
x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #EDEDED, stop: 0.4 #EDEDED,
stop: 0.5 #EDEDED, stop: 1.0 #D3D3D3
);
border-radius: 2px;
border-style: outset;
border-width: 2px;
height: 100px;
border-color: #C3C3C3;
}
QToolBox::tab:selected {
font: italic;
}
'''
)
self.details_tree_view.view.setStyleSheet(
'''
QTreeView:!active {
selection-background-color: #72a6d9;
}
'''
)
def add_tool_buttons(self):
"""
Add toolbar buttons of add, edit and delete buttons.
:return: None
:rtype: NoneType
"""
tool_buttons = QToolBar()
tool_buttons.setObjectName('form_toolbar')
tool_buttons.setIconSize(QSize(16, 16))
self.addSTR = QAction(GuiUtils.get_icon(
'add.png'),
QApplication.translate('ViewSTRWidget', 'Add'),
self
)
self.editSTR = QAction(
GuiUtils.get_icon('edit.png'),
QApplication.translate('ViewSTRWidget', 'Edit'),
self
)
self.deleteSTR = QAction(
GuiUtils.get_icon('remove.png'),
QApplication.translate('ViewSTRWidget', 'Remove'),
self
)
tool_buttons.addAction(self.addSTR)
tool_buttons.addAction(self.editSTR)
tool_buttons.addAction(self.deleteSTR)
self.toolbarVBox.addWidget(tool_buttons)
def initGui(self):
"""
Initialize widget
"""
self.tb_actions.setVisible(False)
self._load_entity_configurations()
self.add_tool_buttons()
# Connect signals
self.tbSTREntity.currentChanged.connect(self.entityTabIndexChanged)
self.btnSearch.clicked.connect(self.searchEntityRelations)
self.btnClearSearch.clicked.connect(self.clearSearch)
# self.tvSTRResults.expanded.connect(self.onTreeViewItemExpanded)
# Set the results treeview to accept requests for context menus
# self.tvSTRResults.setContextMenuPolicy(Qt.CustomContextMenu)
# self.tvSTRResults.customContextMenuRequested.connect(
# self.onResultsContextMenuRequested
# )
if not self._can_create:
self.addSTR.hide()
if not self._can_edit:
self.editSTR.hide()
else:
self.editSTR.setDisabled(True)
if not self._can_delete:
self.deleteSTR.hide()
else:
self.deleteSTR.setDisabled(True)
self.addSTR.triggered.connect(self.load_new_str_editor)
self.deleteSTR.triggered.connect(self.delete_str)
self.editSTR.triggered.connect(self.load_edit_str_editor)
# Load async for the current widget
self.entityTabIndexChanged(0)
def init_progress_dialog(self):
"""
Initializes the progress dialog.
"""
self.progress = QProgressBar(self)
self.progress.resize(self.width(), 10)
self.progress.setTextVisible(False)
def add_spatial_unit_layer(self):
"""
Add the spatial unit layer into the map canvas for later use.
"""
# Used for startup of view STR, just add the first geom layer.
if len(self.geom_cols) > 0:
for spatial_unit in self.spatial_units:
layer_name_item = self.sp_unit_manager.geom_col_layer_name(
spatial_unit.name,
self.geom_cols[0]
)
self.sp_unit_manager.add_layer_by_name(layer_name_item)
def _check_permissions(self):
"""
Enable/disable actions based on the
permissions defined in the content
group.
"""
if self._can_edit:
self.tb_actions.addAction(self._new_str_action)
else:
self.tb_actions.removeAction(self._new_str_action)
if len(self.tb_actions.actions()) == 0:
self.tb_actions.setVisible(False)
else:
self.tb_actions.setVisible(True)
def _load_entity_configurations(self):
"""
Specify the entity configurations.
"""
try:
self.parties = self.curr_profile.social_tenure.parties
tb_str_entities = self.parties + self.spatial_units
for i, t in enumerate(tb_str_entities):
QApplication.processEvents()
entity_cfg = self._entity_config_from_profile(
str(t.name), t.short_name
)
if entity_cfg is not None:
entity_widget = self.add_entity_config(entity_cfg)
# entity_widget.setNodeFormatter(
# EntityNodeFormatter(
# entity_cfg, self.tvSTRResults, self
# )
# )
except DummyException as pe:
self._notif_search_config.clear()
self._notif_search_config.insertErrorNotification(str(pe))
def _entity_config_from_profile(self, table_name, short_name):
"""
Creates an EntityConfig object from the table name.
:param table_name: Name of the database table.
:type table_name: str
:return: Entity configuration object.
:rtype: EntityConfig
"""
table_display_name = format_name(short_name)
entity = self.curr_profile.entity_by_name(table_name)
model = entity_model(entity)
if model is not None:
# Entity configuration
entity_cfg = EntityConfiguration()
entity_cfg.Title = table_display_name
entity_cfg.STRModel = model
entity_cfg.data_source_name = table_name
for col, factory in self._get_widget_factory(entity):
entity_cfg.LookupFormatters[col.name] = factory
# Load filter and display columns
# using only those which are of
# numeric/varchar type
searchable_columns = entity_searchable_columns(entity)
display_columns = entity_display_columns(entity)
for c in searchable_columns:
if c != 'id':
entity_cfg.filterColumns[c] = format_name(c)
for c in display_columns:
if c != 'id':
entity_cfg.displayColumns[c] = format_name(c)
return entity_cfg
else:
return None
def _get_widget_factory(self, entity):
"""
Get widget factory for specific column type
:param entity: Current column entity object
:type entity: Entity
:return c: Column object corresponding to the widget factory
:rtype c: BaseColumn
:return col_factory: Widget factory corresponding to the column type
:rtype col_factory: ColumnWidgetRegistry
"""
for c in entity.columns.values():
col_factory = ColumnWidgetRegistry.factory(c.TYPE_INFO)
if col_factory is not None:
yield c, col_factory(c)
def add_entity_config(self, config):
"""
Set an entity configuration option and
add it to the 'Search Entity' tab.
"""
entityWidg = STRViewEntityWidget(config)
entityWidg.asyncStarted.connect(self._progressStart)
entityWidg.asyncFinished.connect(self._progressFinish)
tabIndex = self.tbSTREntity.addTab(entityWidg, config.Title)
return entityWidg
def entityTabIndexChanged(self, index):
"""
Raised when the tab index of the entity search tab widget changes.
"""
# Get the current widget in the tab container
entityWidget = self.tbSTREntity.currentWidget()
if isinstance(entityWidget, EntitySearchItem):
entityWidget.loadAsync()
def searchEntityRelations(self):
"""
Slot that searches for matching items for
the specified entity and corresponding STR entities.
"""
entityWidget = self.tbSTREntity.currentWidget()
entity_name = entityWidget.config.data_source_name
self._reset_controls()
if isinstance(entityWidget, EntitySearchItem):
valid, msg = entityWidget.validate()
if not valid:
self._notif_search_config.clear()
self._notif_search_config.insertErrorNotification(msg)
return
results, searchWord = entityWidget.executeSearch()
# Show error message
if len(results) == 0:
noResultsMsg = QApplication.translate(
'ViewSTR',
'No results found for "{}"'.format(searchWord)
)
self._notif_search_config.clear()
self._notif_search_config.insertErrorNotification(
noResultsMsg
)
return
party_names = [e.name for e in self.curr_profile.social_tenure.parties]
entity = self.curr_profile.entity_by_name(entity_name)
result_ids = [r.id for r in results]
if entity_name in party_names:
self.active_spu_id = self.details_tree_view.search_party(
entity, result_ids
)
else:
self.details_tree_view.search_spatial_unit(
entity, result_ids
)
# self.tbPropertyPreview._iface.activeLayer().selectByExpression("id={}".format(self.active_spu_id))
# self.details_tree_view._selected_features = self.tbPropertyPreview._iface.activeLayer().selectedFeatures()
# self._load_root_node(entity_name, formattedNode)
def clearSearch(self):
"""
Clear search input parameters (for current widget) and results.
"""
entityWidget = self.tbSTREntity.currentWidget()
if isinstance(entityWidget, EntitySearchItem):
entityWidget.reset()
self._reset_controls()
def _reset_controls(self):
# Clear tree view
self._resetTreeView()
# Clear document listings
self._deleteSourceDocTabs()
# Remove spatial unit memory layer
self.tbPropertyPreview.remove_layer()
def on_select_results(self):
"""
Slot which is raised when the selection
is changed in the tree view
selection model.
"""
if len(self.details_tree_view.view.selectedIndexes()) < 1:
self.disable_buttons()
return
self.search_done = True
index = self.details_tree_view.view.selectedIndexes()[0]
item = self.details_tree_view.model.itemFromIndex(index)
QApplication.processEvents()
# STR node - edit social tenure relationship
if item.text() == self.details_tree_view.str_text:
entity = self.curr_profile.social_tenure
str_model = self.details_tree_view.str_models[item.data()]
documents = self.details_tree_view._supporting_doc_models(
entity.name, str_model
)
self._load_source_documents(documents)
# if there is supporting document,
# expand supporting document tab
if len(documents) > 0:
self.toolBox.setCurrentIndex(1)
self.disable_buttons(False)
# party node - edit party
elif item.data() in self.details_tree_view.spatial_unit_items.keys():
self.toolBox.setCurrentIndex(0)
entity = self.details_tree_view.spatial_unit_items[item.data()]
model = self.details_tree_view.feature_model(entity, item.data())
self.draw_spatial_unit(entity.name, model)
self.disable_buttons()
canvas = iface.mapCanvas()
if canvas:
canvas.zoomToFullExtent()
else:
self.disable_buttons()
def disable_buttons(self, status=True):
if self._can_edit:
self.deleteSTR.setDisabled(status)
if self._can_delete:
self.editSTR.setDisabled(status)
def str_party_column_obj(self, record):
"""
Gets the current party column name in STR
table by finding party column with value
other than None.
:param record: The STR record or result.
:type record: Dictionary
:return: The party column name with value.
:rtype: String
"""
for party in self.parties:
party_name = party.short_name.lower()
party_id = '{}_id'.format(party_name)
if party_id not in record.__dict__:
return None
if record.__dict__[party_id] is not None:
party_id_obj = getattr(self.str_model, party_id)
return party_id_obj
def load_edit_str_editor(self):
self.details_tree_view.edit_selected_node()
self.btnSearch.click()
self.disable_buttons()
def load_new_str_editor(self):
try:
# Check type of node and perform corresponding action
add_str = STREditor()
add_str.exec_()
except DummyException as ex:
QMessageBox.critical(
self._plugin.iface.mainWindow(),
QApplication.translate(
"STDMPlugin",
"Loading Error"
),
str(ex)
)
def delete_str(self):
self.details_tree_view.delete_selected_item()
self.btnSearch.click()
self.disable_buttons()
def onSourceDocumentRemoved(self, container_id, doc_uuid, removed_doc):
"""
Slot raised when a source document is removed from the container.
If there are no documents in the specified container then remove
the tab.
"""
curr_container = self.tbSupportingDocs.currentWidget()
curr_doc_widget = curr_container.findChildren(DocumentWidget)
for doc in curr_doc_widget:
if doc.fileUUID == doc_uuid:
doc.deleteLater()
self.removed_docs = removed_doc
def draw_spatial_unit(self, entity_name, model):
"""
Render the geometry of the given spatial unit in the spatial view.
:param row_id: Sqlalchemy object representing a feature.
"""
entity = self.curr_profile.entity_by_name(entity_name)
self.tbPropertyPreview.draw_spatial_unit(entity, model)
def showEvent(self, event):
"""
(Re)load map layers in the viewer and main canvas.
:param event: Window event
:type event: QShowEvent
"""
self.setEnabled(True)
if QTimer is not None:
QTimer.singleShot(200, self.init_mirror_map)
return QMainWindow.showEvent(self, event)
def init_mirror_map(self):
self._notify_no_base_layers()
# Add spatial unit layer if it doesn't exist
self.tbPropertyPreview.refresh_canvas_layers()
self.tbPropertyPreview.load_web_map()
def _notify_no_base_layers(self):
"""
Checks if there are any base layers that will be used when
visualizing the spatial units. If there are no base layers
then insert warning message.
"""
self._notif_search_config.clear()
num_layers = len(QgsProject.instance().mapLayers())
if num_layers == 0:
msg = QApplication.translate(
"ViewSTR",
"No basemap layers are loaded in the "
"current project. Basemap layers "
"enhance the visualization of spatial units."
)
self._notif_search_config.insertWarningNotification(msg)
def _deleteSourceDocTabs(self):
"""
Removes all source document tabs and deletes their references.
"""
tabCount = self.tbSupportingDocs.count()
while tabCount != 0:
srcDocWidget = self.tbSupportingDocs.widget(tabCount - 1)
self.tbSupportingDocs.removeTab(tabCount - 1)
del srcDocWidget
tabCount -= 1
self._strID = None
self._source_doc_manager.reset()
def _resetTreeView(self):
"""
Clears the results tree view.
"""
# Reset tree view
strModel = self.details_tree_view.view.model()
resultsSelModel = self.details_tree_view.view.selectionModel()
if strModel:
strModel.clear()
if resultsSelModel:
if self.search_done:
resultsSelModel.selectionChanged.disconnect(self.on_select_results)
resultsSelModel.selectionChanged.connect(self.on_select_results)
def _load_source_documents(self, source_docs):
"""
Load source documents into document listing widget.
"""
# Configure progress dialog
progress_msg = QApplication.translate(
"ViewSTR", "Loading supporting documents..."
)
progress_dialog = QProgressDialog(self)
if len(source_docs) > 0:
progress_dialog.setWindowTitle(progress_msg)
progress_dialog.setRange(0, len(source_docs))
progress_dialog.setWindowModality(Qt.WindowModal)
progress_dialog.setFixedWidth(380)
progress_dialog.show()
progress_dialog.setValue(0)
self._notif_search_config.clear()
self.tbSupportingDocs.clear()
self._source_doc_manager.reset()
if len(source_docs) < 1:
empty_msg = QApplication.translate(
'ViewSTR', 'No supporting document is uploaded '
'for this social tenure relationship.'
)
self._notif_search_config.clear()
self._notif_search_config.insertWarningNotification(empty_msg)
for i, (doc_type_id, doc_obj) in enumerate(source_docs.items()):
# add tabs, and container and widget for each tab
tab_title = self._source_doc_manager.doc_type_mapping[doc_type_id]
tab_widget = QWidget()
tab_widget.setObjectName(tab_title)
cont_layout = QVBoxLayout(tab_widget)
cont_layout.setObjectName('widget_layout_' + tab_title)
scrollArea = QScrollArea(tab_widget)
scrollArea.setFrameShape(QFrame.NoFrame)
scrollArea_contents = QWidget()
scrollArea_contents.setObjectName('tab_scroll_area_' + tab_title)
tab_layout = QVBoxLayout(scrollArea_contents)
tab_layout.setObjectName('layout_' + tab_title)
scrollArea.setWidgetResizable(True)
scrollArea.setWidget(scrollArea_contents)
cont_layout.addWidget(scrollArea)
self._source_doc_manager.registerContainer(
tab_layout, doc_type_id
)
for doc in doc_obj:
try:
# add doc widgets
self._source_doc_manager.insertDocFromModel(
doc, doc_type_id
)
except DummyException as ex:
LOGGER.debug(str(ex))
self.tbSupportingDocs.addTab(
tab_widget, tab_title
)
progress_dialog.setValue(i + 1)
progress_dialog.deleteLater()
del progress_dialog
# def _on_node_reference_changed(self, rootHash):
# """
# Method for resetting document listing and map preview
# if another root node and its children
# are selected then the documents are reset as
# well as the map preview control.
# """
# if rootHash != self._curr_rootnode_hash:
# self._deleteSourceDocTabs()
# self._curr_rootnode_hash = rootHash
def _progressStart(self):
"""
Load progress dialog window.
For items whose durations is unknown,
'isindefinite' = True by default.
If 'isindefinite' is False, then
'rangeitems' has to be specified.
"""
pass
def _progressFinish(self):
"""
Hide progress dialog window.
"""
pass
def _edit_permissions(self):
"""
Returns True/False whether the current logged in user
has permissions to create new social tenure relationships.
If true, then the system assumes that
they can also edit STR records.
"""
canEdit = False
userName = globals.APP_DBCONN.User.UserName
authorizer = Authorizer(userName)
newSTRCode = "9576A88D-C434-40A6-A318-F830216CA15A"
# Get the name of the content from the code
cnt = Content()
createSTRCnt = cnt.queryObject().filter(
Content.code == newSTRCode
).first()
if createSTRCnt:
name = createSTRCnt.name
canEdit = authorizer.CheckAccess(name)
return canEdit
class EntitySearchItem(QObject):
"""
Abstract class for implementation by widgets that
enable users to search for entity records.
"""
def __init__(self, formatter=None):
super().__init__()
# Specify the formatter that should be
# applied on the result item. It should
# inherit from 'stdm.navigation.STRNodeFormatter'
self.formatter = formatter
def setNodeFormatter(self, formatter):
"""
Set the formatter that should be
applied on the entity search results.
"""
self.formatter = formatter
def validate(self):
"""
Method for validating the input arguments
before a search is conducted.
Should return bool indicating whether validation
was successful and message (applicable if validation fails).
"""
raise NotImplementedError()
def executeSearch(self):
"""
Implemented when the a search operation
is executed. Should return tuple of formatted
results for render in the tree view,raw
object results and search word.
"""
raise NotImplementedError(
str(
QApplication.translate(
"ViewSTR",
"Subclass must implement abstract method."
)
)
)
def loadAsync(self):
"""
Any initialization that needs to be carried
out when the parent container is activated.
"""
pass
def errorHandler(self, error):
"""
Generic handler that logs error
messages to the QGIS message log
"""
# QgsMessageLog.logMessage(error,2)
LOGGER.debug(error)
def reset(self):
"""
Clear search results.
"""
pass
WIDGET2, BASE2 = uic.loadUiType(
GuiUtils.get_ui_file_path('ui_str_view_entity.ui'))
class STRViewEntityWidget(WIDGET2, BASE2, EntitySearchItem):
"""
A widget that represents options for searching through an entity.
"""
asyncStarted = pyqtSignal()
asyncFinished = pyqtSignal()
def __init__(self, config, formatter=None, parent=None):
QWidget.__init__(self, parent)
EntitySearchItem.__init__(self, formatter)
self.setupUi(self)
self.tbSTRViewEntity.setTabIcon(0, GuiUtils.get_icon('filter.png'))
self.tbSTRViewEntity.setTabIcon(1, GuiUtils.get_icon('period_blue.png'))
self.config = config
self.setConfigOptions()
self.curr_profile = current_profile()
self.social_tenure = self.curr_profile.social_tenure
self.str_model = entity_model(self.social_tenure)
# Model for storing display and actual mapping values
self._completer_model = None
self._proxy_completer_model = None
# Hook up signals
self.cboFilterCol.currentIndexChanged.connect(
self._on_column_index_changed
)
self.init_validity_dates()
self.validity_from_date.dateChanged.connect(
self.set_minimum_to_date
)
self.validity.setDisabled(True)
self.init_validity_checkbox()
def init_validity_checkbox(self):
self.check_box_list = []
self.validity_checkbox = QCheckBox()
self.check_box_list.append(self.validity_checkbox)
self.tbSTRViewEntity.tabBar().setTabButton(
self.tbSTRViewEntity.tabBar().count() - 1,
QTabBar.LeftSide, self.validity_checkbox
)
self.validity_checkbox.stateChanged.connect(self.toggle_validity_period)
def toggle_validity_period(self, state):
if state == Qt.Checked:
self.validity.setDisabled(False)
else:
self.validity.setDisabled(True)
def set_minimum_to_date(self):
"""
Set the minimum to date based on the
change in value of from date.
:return:
:rtype:
"""
self.validity_to_date.setMinimumDate(
self.validity_from_date.date()
)
def init_validity_dates(self):
"""
Initialize the dates by setting the current date.
:return:
:rtype:
"""
self.validity_from_date.setDate(
date.today()
)
self.validity_to_date.setDate(
date.today()
)
def setConfigOptions(self):
"""
Apply configuration options.
"""
# Set filter columns and remove id column
for col_name, display_name in self.config.filterColumns.items():
if col_name != "id":
self.cboFilterCol.addItem(
display_name, col_name
)
def loadAsync(self):
"""
Asynchronously loads an entity's attribute values.
"""
self.asyncStarted.emit()
# Create model worker
workerThread = QThread(self)
modelWorker = ModelWorker()
modelWorker.moveToThread(workerThread)
# Connect signals
modelWorker.error.connect(self.errorHandler)
workerThread.started.connect(
lambda: modelWorker.fetch(
self.config.STRModel, self.currentFieldName()
)
)
modelWorker.retrieved.connect(self._asyncFinished)
modelWorker.retrieved.connect(workerThread.quit)
workerThread.finished.connect(modelWorker.deleteLater)
workerThread.finished.connect(workerThread.deleteLater)
# Start thread
workerThread.start()
def validate(self):
"""
Validate entity search widget
"""
is_valid = True
message = ""
if self.txtFilterPattern.text() == "":
message = QApplication.translate(
"ViewSTR", "Search word cannot be empty."
)
is_valid = False
return is_valid, message
def executeSearch(self):
"""
Base class override.
Search for matching items for the specified entity and column.
"""
model_root_node = None
prog_dialog = QProgressDialog(self)
prog_dialog.setFixedWidth(380)
prog_dialog.setWindowTitle(
QApplication.translate(
"STRViewEntityWidget",
"Searching for STR..."
)
)
prog_dialog.show()
prog_dialog.setRange(
0, 10
)
search_term = self._searchTerm()
prog_dialog.setValue(2)
# Try to get the corresponding search term value from the completer model
if self._completer_model is not None:
reg_exp = QRegExp("^%s$" % search_term, Qt.CaseInsensitive,
QRegExp.RegExp2)
self._proxy_completer_model.setFilterRegExp(reg_exp)
if self._proxy_completer_model.rowCount() > 0:
# Get corresponding actual value from the first matching item
value_model_idx = self._proxy_completer_model.index(0, 1)
source_model_idx = self._proxy_completer_model.mapToSource(
value_model_idx
)
prog_dialog.setValue(4)
search_term = self._completer_model.data(
source_model_idx, Qt.DisplayRole
)
modelInstance = self.config.STRModel()
modelQueryObj = modelInstance.queryObject()
queryObjProperty = getattr(
self.config.STRModel, self.currentFieldName()
)
entity_name = modelQueryObj._primary_entity._label_name
entity = self.curr_profile.entity_by_name(entity_name)
prog_dialog.setValue(6)
# Get property type so that the filter can
# be applied according to the appropriate type
propType = queryObjProperty.property.columns[0].type
results = []
try:
if not isinstance(propType, String):
col_name = self.currentFieldName()
col = entity.columns[self.currentFieldName()]
if col.TYPE_INFO == 'LOOKUP':
lookup_entity = lookup_parent_entity(
self.curr_profile, col_name
)
lkp_model = entity_model(lookup_entity)
lkp_obj = lkp_model()
value_obj = getattr(
lkp_model, 'value'
)
result = lkp_obj.queryObject().filter(
func.lower(value_obj) == func.lower(search_term)
).first()
if result is None:
result = lkp_obj.queryObject().filter(
func.lower(value_obj).like(search_term + '%')
).first()
if result is not None:
results = modelQueryObj.filter(
queryObjProperty == result.id
).all()
else:
results = []
else:
results = modelQueryObj.filter(
func.lower(queryObjProperty) == func.lower(search_term)
).all()
if self.validity.isEnabled():
valid_str_ids = self.str_validity_period_filter(results)
else:
valid_str_ids = None
prog_dialog.setValue(7)
except exc.StatementError:
prog_dialog.deleteLater()
del prog_dialog
return model_root_node, [], search_term
# if self.formatter is not None:
# self.formatter.setData(results)
# model_root_node = self.formatter.root(valid_str_ids)
prog_dialog.setValue(10)
prog_dialog.hide()
prog_dialog.deleteLater()
del prog_dialog
return results, search_term
def str_validity_period_filter(self, results):
"""
Filter the entity results using validity period in STR table.
:param results: Entity result
:type results: SQLAlchemy result proxy
:return: Valid list of STR ids
:rtype: List
"""
self.str_model_obj = self.str_model()
valid_str_ids = []
for result in results:
from_date = self.validity_from_date.date().toPyDate()
to_date = self.validity_to_date.date().toPyDate()
entity_id = '{}_id'.format(result.__table__.name[3:])
str_column_obj = getattr(self.str_model, entity_id)
str_result = self.str_model_obj.queryObject().filter(
self.str_model.validity_start >= from_date).filter(
self.str_model.validity_end <= to_date
).filter(str_column_obj == result.id).all()
for res in str_result:
valid_str_ids.append(res.id)
return valid_str_ids
def reset(self):
"""
Clear search input parameters.
"""
self.txtFilterPattern.clear()
if self.cboFilterCol.count() > 0:
self.cboFilterCol.setCurrentIndex(0)
def currentFieldName(self):
"""
Returns the name of the database field
from the current item in the combo box.
"""
curr_index = self.cboFilterCol.currentIndex()
field_name = self.cboFilterCol.itemData(curr_index)
if field_name is None:
return
else:
return field_name
def _searchTerm(self):
"""
Returns the search term specified by the user.
"""
return self.txtFilterPattern.text()
def _asyncFinished(self, model_values):
"""
Slot raised when worker has finished retrieving items.
"""
# Create QCompleter and add values to it.
self._update_completer(model_values)
self.asyncFinished.emit()
def _update_completer(self, values):
# Get the items in a tuple and put them in a list
# Store display and actual values in a
# model for easier mapping and
# retrieval when carrying out searches
model_attr_mapping = []
# Check if there are formaters specified
# for the current field name
for mv in values:
f_model_values = []
m_val = mv[0]
if m_val is not None:
col_label = self.currentFieldName()
if col_label in self.config.LookupFormatters:
formatter = self.config.LookupFormatters[col_label]
if formatter.column.TYPE_INFO == 'LOOKUP':
m_val = formatter.code_value(m_val)[0]
else:
m_val = formatter.format_column_value(m_val)
f_model_values.extend([m_val, m_val])
model_attr_mapping.append(f_model_values)
self._completer_model = BaseSTDMTableModel(model_attr_mapping, ["", ""], self)
# We will use the QSortFilterProxyModel for filtering purposes
self._proxy_completer_model = QSortFilterProxyModel()
self._proxy_completer_model.setDynamicSortFilter(True)
self._proxy_completer_model.setSourceModel(self._completer_model)
self._proxy_completer_model.setSortCaseSensitivity(Qt.CaseInsensitive)
self._proxy_completer_model.setFilterKeyColumn(0)
# Configure completer
mod_completer = QCompleter(self._completer_model, self)
mod_completer.setCaseSensitivity(Qt.CaseInsensitive)
mod_completer.setCompletionMode(QCompleter.PopupCompletion)
mod_completer.setCompletionColumn(0)
mod_completer.setCompletionRole(Qt.DisplayRole)
self.txtFilterPattern.setCompleter(mod_completer)
def _on_column_index_changed(self, int):
"""
Slot raised when the user selects a different filter column.
"""
self.txtFilterPattern.clear()
self.loadAsync()
class EntityConfiguration(object):
"""
Specifies the configuration to apply when creating
a new tab widget for performing entity searches.
"""
browseDescription = "Click on the browse button below to load entity " \
"records and their corresponding social tenure " \
"relationship definitions."
defaultFieldName = ""
# Format of each dictionary item:
# property/db column name - display name
filterColumns = OrderedDict()
displayColumns = OrderedDict()
groupBy = ""
STRModel = None
Title = ""
data_source_name = ""
# Functions for formatting values before
# they are loaded into the completer
LookupFormatters = {}
def __init__(self):
# Reset filter and display columns
self.filterColumns = OrderedDict()
self.displayColumns = OrderedDict()
class ModelWorker(QObject):
"""
Worker for retrieving model attribute
values stored in the database.
"""
retrieved = pyqtSignal(object)
error = pyqtSignal(str)
pyqtSlot(object, str)
def fetch(self, model, fieldname):
"""
Fetch attribute values from the
database for the specified model
and corresponding column name.
"""
try:
if hasattr(model, fieldname):
modelInstance = model()
obj_property = getattr(model, fieldname)
model_values = modelInstance.queryObject(
[obj_property]
).distinct()
self.retrieved.emit(model_values)
except DummyException as ex:
self.error.emit(str(ex))
| gpl-2.0 | -8,273,558,001,482,112,000 | 32.030441 | 120 | 0.562371 | false |
multikatt/beets | beetsplug/permissions.py | 1 | 3116 | from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(config['directory'].get(),
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
| mit | 8,238,867,990,123,123,000 | 29.851485 | 78 | 0.626765 | false |
gonadarian/kagen | kagen/khan.py | 1 | 1955 | import os
import csv
import json
import pymongo
from kagen import utils
from kagen.utils import config
from datetime import datetime
logger = utils.get_logger("khan")
def work():
khan = utils.get_conn_khan()
db = utils.get_conn_mongo()
dtf = "%Y-%m-%dT%H:%M:%SZ"
doc = utils.get_response_json(khan, "/api/v1/playlists")
for item in doc:
item["_id"] = item["id"]
for playlist in doc:
playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf)
db.playlists.drop()
db.playlists.insert(doc)
logger.info("loaded {} items in playlists collection".format(len(doc)))
doc = utils.get_response_json(khan, "/api/v1/playlists/library")
db.playlists_library.drop()
db.playlists_library.insert(doc)
logger.info("loaded {} items in playlists_library collection".format(len(doc)))
doc = utils.get_response_json(khan, "/api/v1/playlists/library/list")
for playlist in doc:
playlist["_id"] = playlist["id"]
playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf)
db.playlists_library_list.drop()
db.playlists_library_list.insert(doc)
logger.info("loaded {} items in playlists_library_list collection".format(len(doc)))
videos = []
ids = []
for playlist in doc:
for video in playlist["videos"]:
video_id = video["id"]
if video_id not in ids:
video["_id"] = video_id
videos.append(video)
ids.append(video_id)
video["date_added"] = datetime.strptime(video["date_added"], dtf)
video["backup_timestamp"] = datetime.strptime(video["backup_timestamp"], dtf)
db.video_list.drop()
db.video_list.insert(videos)
logger.info("loaded {} items in video_list collection".format(len(videos)))
@utils.entry_point
def main():
logger.info("START khan")
work()
logger.info("DONE khan")
| mit | 5,795,363,018,765,925,000 | 31.583333 | 91 | 0.640409 | false |
irvined1982/olweb-clients | bin/bkill.py | 1 | 3674 | #!/usr/bin/env python
# Copyright 2014 David Irvine
#
# This file is part of olwclients
#
# olwclients is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# olwclients is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with olwclients. If not, see <http://www.gnu.org/licenses/>.
import argparse
from olwclient import *
import getpass
import re
import sys
parser = argparse.ArgumentParser(description='Displays information about hosts')
OpenLavaConnection.configure_argument_list(parser)
parser.add_argument("-J", dest="job_name", default=None,
help="Operates only on jobs with the specified job_name. The -J option is ignored if a job ID \
other than 0 is specified in the job_ID option.")
parser.add_argument("-m", dest="host_name", default=None,
help="Operates only on jobs dispatched to the specified host or host group.")
parser.add_argument("-q", dest="queue_name", default=None,
help="Operates only on jobs in the specified queue.")
parser.add_argument("-u", dest="user_name", default=getpass.getuser(),
help="Operates only on jobs submitted by the specified user or user group (see bugroup(1)), or by \
all users if the reserved user name all is specified.")
parser.add_argument("job_ids", nargs='+', type=str, default=None,
help='Operates only on jobs that are specified by job_ID or "job_ID[index]", where \
"job_ID[index]" specifies selected job array elements (see bjobs(1)). For job arrays, quotation \
marks must enclose the job ID and index, and index must be enclosed in square brackets.')
parser.add_argument("-s", dest="signal", default="kill", choices=["kill", "suspend", "resume", "requeue"],
help="Sends the specified signal to specified jobs. Signals can be one of: kill, suspend, resume, \
requeue,")
args = parser.parse_args()
connection = OpenLavaConnection(args)
if 0 in args.job_ids or "0" in args.job_ids:
jobs = Job.get_job_list(connection,
user_name=args.user_name,
host_name=args.host_name,
queue_name=args.queue_name,
job_name=args.job_name,
)
else:
jobs = []
for job_id in args.job_ids:
try:
jid = int(job_id)
aid = 0
except ValueError:
match = re.search('\d+\[\d+\]', job_id)
if match:
jid = match.group(0)
aid = match.group(1)
else:
print "Invalid job id: %s" % job_id
sys.exit(1)
jobs.append(Job(connection, job_id=jid, array_index=aid))
try:
for job in jobs:
try:
print "Sending %s signal to job: %s[%s]" % (args.signal, job.job_id, job.array_index)
getattr(job, args.signal)()
except PermissionDeniedError, e:
print "Unable to perform action on job: %s[%s]: %s" % (job.job_id, job.array_index, e.message)
except RemoteServerError, e:
print "Unable to display job information: %s" % e.message
sys.exit(1)
| gpl-2.0 | 4,289,308,900,084,577,300 | 42.223529 | 119 | 0.619488 | false |
panosl/helios | helios/orders/forms.py | 1 | 1114 | from django import forms
from helios.shipping.models import ShippingMethodRegions
class ShippingChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return u'%s, %s - %s' % (obj.method.name, obj.method.shipper, obj.cost)
# todo this needs to be handled either here
# or in the checkout view in the store app
class ShippingOrderForm(forms.Form):
def __init__(self, customer, *args, **kwargs):
super(ShippingOrderForm, self).__init__(*args, **kwargs)
methods = [region.shippingmethodregions_set.all()
for region in customer.country.shippingregion_set.all()]
methods = [method[0] for method in methods]
self.fields['shipping_choice'].queryset = ShippingMethodRegions.objects.filter(id__in=[method.id for method in methods])
shipping_choice = ShippingChoiceField(
queryset=ShippingMethodRegions.objects.all(),
empty_label=None,
widget=forms.RadioSelect(attrs={
'class': 'order',
'onclick': '$("#shipping_choice").submit()',
})
)
class OrderForm(forms.Form):
pass
| bsd-3-clause | 1,921,637,111,047,108,400 | 34.935484 | 128 | 0.670557 | false |
buaawp/pums | mock/migrations/0001_initial.py | 1 | 3879 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LtMockModule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='module', max_length=128)),
('description', models.CharField(max_length=1024, blank=True)),
],
),
migrations.CreateModel(
name='LtMockProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='project', max_length=128)),
('description', models.CharField(max_length=1024, blank=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='LtMockRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='defaultName', max_length=128)),
('method', models.CharField(default='GET', max_length=20)),
('address', models.CharField(default='defaultUrl', max_length=2048)),
('params', models.CharField(max_length=1648, blank=True)),
('module', models.ForeignKey(to='mock.LtMockModule')),
('project', models.ForeignKey(to='mock.LtMockProject')),
],
),
migrations.CreateModel(
name='LtMockRequestHeader',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(default='defaultkey', max_length=128)),
('value', models.CharField(max_length=1024, blank=True)),
],
),
migrations.CreateModel(
name='LtMockRequestParam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(default='defaultkey', max_length=128)),
('value', models.CharField(max_length=1024, blank=True)),
],
),
migrations.CreateModel(
name='LtMockResponse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='defaultresponse', max_length=128)),
('template', models.CharField(max_length=2048, blank=True)),
('sample', models.CharField(max_length=2048, blank=True)),
],
),
migrations.AddField(
model_name='ltmockrequest',
name='requestheader',
field=models.ForeignKey(blank=True, to='mock.LtMockRequestHeader', null=True),
),
migrations.AddField(
model_name='ltmockrequest',
name='requestparam',
field=models.ForeignKey(blank=True, to='mock.LtMockRequestParam', null=True),
),
migrations.AddField(
model_name='ltmockrequest',
name='response',
field=models.ForeignKey(blank=True, to='mock.LtMockResponse', null=True),
),
migrations.AddField(
model_name='ltmockmodule',
name='project',
field=models.ForeignKey(to='mock.LtMockProject'),
),
]
| mit | 6,251,896,528,812,043,000 | 42.58427 | 114 | 0.564836 | false |
cossatot/culpable | culpable/magnitudes.py | 1 | 22751 | import numpy as np
from .stats import Pdf, pdf_from_samples, multiply_pdfs, divide_pdfs
"""
Scaling relationships and related equations for earthquake magnitude
calculations.
"""
"""
Normalized slip distribution from Biasi and Weldon, 2006
"""
Dn_x = np.array(
[ 0. , 0.03852144, 0.07704287, 0.11556431, 0.15408574,
0.19260718, 0.23112861, 0.26965005, 0.30817149, 0.34669292,
0.38521436, 0.42373579, 0.46225723, 0.50077866, 0.5393001 ,
0.57782153, 0.61634297, 0.65486441, 0.69338584, 0.73190728,
0.77042871, 0.80895015, 0.84747158, 0.88599302, 0.92451446,
0.96303589, 1.00155733, 1.04007876, 1.0786002 , 1.11712163,
1.15564307, 1.19416451, 1.23268594, 1.27120738, 1.30972881,
1.34825025, 1.38677168, 1.42529312, 1.46381456, 1.50233599,
1.54085743, 1.57937886, 1.6179003 , 1.65642173, 1.69494317,
1.7334646 , 1.77198604, 1.81050748, 1.84902891, 1.88755035,
1.92607178, 1.96459322, 2.00311465, 2.04163609, 2.08015753,
2.11867896, 2.1572004 , 2.19572183, 2.23424327, 2.2727647 ,
2.31128614, 2.34980758, 2.38832901, 2.42685045, 2.46537188,
2.50389332, 2.54241475, 2.58093619, 2.61945762, 2.65797906,
2.6965005 , 2.73502193, 2.77354337, 2.8120648 , 2.85058624,
2.88910767, 2.92762911, 2.96615055, 3.00467198, 3.04319342,
3.08171485, 3.12023629, 3.15875772, 3.19727916, 3.2358006 ,
3.27432203, 3.31284347, 3.3513649 , 3.38988634, 3.42840777,
3.46692921, 3.50545064, 3.54397208, 3.58249352, 3.62101495,
3.65953639, 3.69805782, 3.73657926, 3.77510069, 3.81362213])
Dn_y = np.array(
[ 3.56431234e-01, 4.07514412e-01, 4.49469325e-01, 4.80250978e-01,
4.99600050e-01, 5.08967345e-01, 5.11056831e-01, 5.09135209e-01,
5.06305810e-01, 5.04929021e-01, 5.06305202e-01, 5.10647854e-01,
5.17294850e-01, 5.25056042e-01, 5.32585263e-01, 5.38688051e-01,
5.42518154e-01, 5.43657945e-01, 5.42107125e-01, 5.38215229e-01,
5.32589131e-01, 5.25993774e-01, 5.19250549e-01, 5.13129949e-01,
5.08236899e-01, 5.04898081e-01, 5.03074847e-01, 5.02334004e-01,
5.01903866e-01, 5.00822254e-01, 4.98152675e-01, 4.93216557e-01,
4.85776256e-01, 4.76112653e-01, 4.64970884e-01, 4.53387277e-01,
4.42445033e-01, 4.33023117e-01, 4.25598012e-01, 4.20136711e-01,
4.16092401e-01, 4.12492219e-01, 4.08093894e-01, 4.01583982e-01,
3.91790171e-01, 3.77880214e-01, 3.59519131e-01, 3.36956396e-01,
3.11019404e-01, 2.83002312e-01, 2.54461304e-01, 2.26954105e-01,
2.01783046e-01, 1.79805426e-01, 1.61356306e-01, 1.46292387e-01,
1.34126853e-01, 1.24201482e-01, 1.15842979e-01, 1.08470898e-01,
1.01650879e-01, 9.51051805e-02, 8.86970782e-02, 8.24006991e-02,
7.62618151e-02, 7.03540397e-02, 6.47382510e-02, 5.94357659e-02,
5.44230300e-02, 4.96471997e-02, 4.50527124e-02, 4.06047119e-02,
3.62987575e-02, 3.21550847e-02, 2.82040784e-02, 2.44727150e-02,
2.09786579e-02, 1.77325398e-02, 1.47440829e-02, 1.20266593e-02,
9.59725861e-03, 7.47225770e-03, 5.66159378e-03, 4.16411755e-03,
2.96568107e-03, 2.04006393e-03, 1.35194170e-03, 8.60866657e-04,
5.25372416e-04, 3.06545806e-04, 1.70626053e-04, 9.04155999e-05,
4.55329491e-05, 2.17590136e-05, 9.85449333e-06, 4.22528115e-06,
1.71367970e-06, 6.56980895e-07, 2.37946616e-07, 8.13790788e-08])
Dn = Pdf(Dn_x, Dn_y)
Dn_sb = multiply_pdfs(Dn, Pdf([Dn_x.min(), Dn_x.max()],
[Dn_x.min(), Dn_x.max()]))
"""
Probability distribution for an earthquake breaking the surface given
Gutenberg-Richter prior; to be used as a p(M) prior for paleoseismic magnitudes
from Biasi and Weldon 2006
"""
gr_pm_x = [5.000, 5.001, 5.057, 5.097, 5.192, 5.300, 5.392, 5.499, 5.597,
5.753, 5.922, 6.021, 6.211, 6.353, 6.533, 6.604, 6.771, 6.999,
7.280, 7.507, 7.726, 7.953, 8.182]
gr_pm_y = [0.000, 0.030, 0.050, 0.063, 0.081, 0.089, 0.089, 0.085, 0.079,
0.067, 0.054, 0.047, 0.035, 0.027, 0.020, 0.018, 0.013, 0.008,
0.005, 0.003, 0.002, 9.785e-4, 0.00]
"""
Conversion functions
"""
def _exp_10(x):
return 10**x
log_fn = {'e': np.log,
'10': np.log10}
exp_fn = {'e': np.exp,
'10': _exp_10}
M_from_D_coeffs = {'BW_2006': {'a': 6.94,
'b': 1.14,
'log_base': '10'},
# WC_1994 are for Average Displacement, not max.
'WC_1994_all': {'a': 6.93,
'b': 0.82,
'log_base': '10'},
'WC_1994_SS': {'a': 7.04,
'b': 0.89,
'log_base': '10'},
'WC_1994_R': {'a': 6.64,
'b': 0.13,
'log_base': '10'},
'WC_1994_N': {'a': 6.78,
'b': 0.65,
'log_base': '10'},
}
M_from_L_coeffs = {'Stirling_2002_instr': {'a': 5.45,
'a_err': 0.08,
'b': 0.95,
'b_err': 0.06,
'log_base': '10'},
'Stirling_2002_pre_instr': {'a': 5.89,
'a_err': 0.11,
'b': 0.79,
'b_err': 0.06,
'log_base': '10'},
'WC_1994_all': {'a': 5.08,
'a_err': 0.1,
'b': 1.16,
'b_err': 0.07,
'log_base': '10'},
'WC_1994_SS': {'a': 5.16,
'a_err': 0.13,
'b': 1.12,
'b_err': 0.08,
'log_base': '10'},
'WC_1994_R': {'a': 5.00,
'a_err': 0.22,
'b': 1.22,
'b_err': 0.16,
'log_base': '10'},
'WC_1994_N': {'a': 4.86,
'a_err': 0.34,
'b': 1.32,
'b_err': 0.26,
'log_base': '10'},
}
def M_from_D(D, ref='BW_2006', a=None, b=None, base='e'):
"""
Moment magnitude from displacement, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'log'.
General relationship is M = a + b * log(D).
Parameters
----------
D : Scalar or vector values for displacement (in meters)
ref : string indicating scaling relationship.
'BW_2006' is Biasi and Weldon (2006) (default).
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
a : Scalar, or vector of same length as D.
b : Scalar, or vector of same length as D.
base : String, base for logarithm, default 'e'.
'e' is natural log.
'10' is log10.
Returns
-------
M : Scalar or vector of calculated magnitude, with shape of D.
"""
if ref is not None:
# consider warning if ref is not None and a, b, log are inputs
a = M_from_D_coeffs[ref]['a']
b = M_from_D_coeffs[ref]['b']
base = M_from_D_coeffs[ref]['log_base']
else:
pass
return a + b * log_fn[base](D)
def D_from_M(M, ref='BW_2006', a=None, b=None, base='e'):
"""
Moment magnitude from displacement, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'base'.
General relationship is D = base ** ((M - a) / b)
Parameters
----------
M : Scalar or vector values for moment magnitude
ref : string indicating scaling relationship.
'BW_2006' is Biasi and Weldon (2006) (default).
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
a : Scalar, or vector of same length as M.
b : Scalar, or vector of same length as M.
base : String, base for exponent, default 'e'.
'e' is e.
'10' is 10.
Returns
-------
D : Scalar or vector of calculated displacement (in meters),
with shape of M.
"""
if ref is not None:
a = M_from_D_coeffs[ref]['a']
b = M_from_D_coeffs[ref]['b']
base = M_from_D_coeffs[ref]['log_base']
return exp_fn[base]((M - a) / b)
def M_from_L(L, ref='Stirling_2002_instr', unit='km', a=None, b=None, base='e',
a_err=None, b_err=None, mc=False):
"""
Moment magnitude from length, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'log'.
General relationship is M = a + b * log(D).
Parameters
----------
D : Scalar or vector values for displacement (in meters)
ref : string indicating scaling relationship.
'Stirling_2002_instr' is from Stirling et al. 2002, instrumental data.
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
unit : Unit of length measure. Default is 'km'. 'm' also works.
a : Scalar, or vector of same length as D.
a_err : Standard error of `a`. Scalar.
b : Scalar, or vector of same length as D.
b_err : Standard error of `b`. Scalar.
log : String, base for logarithm, default 'e'.
'e' is natural log.
'10' is log10.
mc : Boolean that indicates whether to sample the coefficents a and b
including uncertainties `a_err` and `b_err` through Monte Carlo
techniques.
Returns
-------
M : Scalar or vector of calculated magnitude, with shape of L.
"""
# unit conversion
if unit == 'm':
L = L * 1000.
if ref is not None:
a = M_from_L_coeffs[ref]['a']
b = M_from_L_coeffs[ref]['b']
base = M_from_L_coeffs[ref]['log_base']
try:
a_err = M_from_L_coeffs[ref]['a_err']
b_err = M_from_L_coeffs[ref]['b_err']
except KeyError:
pass
if mc == True:
A = a if a_err is None else np.random.normal(a, a_err, len(L))
B = b if b_err is None else np.random.normal(b, b_err, len(L))
else:
A = a
B = b
return A + B * log_fn[base](L)
"""
Estimation functions
"""
def p_D_M(D, M, ref='BW_2006', sample_bias_corr=False):
"""
Likelihood of predicted D given M, as defined by Biasi and Weldon (2006).
Parameters
----------
D : Scalar or array of displacement values (in meters).
M : Scalar or array of magnitudes.
ref: Displacement-magnitude scaling reference (string).
'BW_2006' is Biasi and Weldon (2006).
'WC_1994_all' is Wells and Coppersmith (1994).
Returns
-------
p_D_M : Calculated likelihood. If scalar, simply returns the likelihood.
If not, returns an improper pdf (a `culpable.stats.Pdf`) which
is an interpolation class. Actual likelihoods are `p_D_M.y`, and
corresponding magnitudes (i.e. the prior p_M) are `p_D_M.x`.
"""
D_ave = D_from_M(M, ref=ref)
D = np.abs(D)
if sample_bias_corr == True:
Dn_ = Dn_sb
else:
Dn_ = Dn
if np.isscalar(D):
D_score = D / D_ave
p_D_M = Dn_(D_score)
else:
D_score = np.array([d / D_ave for d in D])
p_D_M = Dn_(D_score)
p_D_M = np.mean(p_D_M, axis=0)
if np.isscalar(p_D_M):
p_D_M = np.float(p_D_M)
else:
p_D_M = Pdf(M, p_D_M, normalize=True)
return p_D_M
def _make_p_M_x(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Makes the X values (i.e., the magnitudes) for a p_M distribution.
"""
if n_M is not None:
p_M_x = np.linspace(p_M_min, p_M_max, num=n_M)
else:
if M_step is None:
M_step = 0.1 # in case it's passed as None from another function
p_M_x = np.arange(p_M_min, p_M_max + M_step, M_step)
return p_M_x
def make_p_M_uniform(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Creates a uniform PDF between the minimum and maximum magnitudes given
by p_M_min and p_M_max.
Parameters
----------
p_M_min : Minimum magnitude.
p_M_max : Maximum magnitude.
M_step : Width of steps in interpolation (no effect on final results).
n_M : number of points in interpolation (no effect on final results).
Returns
-------
p_M : Pdf function with a uniform distribution between p_M_min and p_M_max
"""
p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step,
n_M=n_M)
return Pdf(p_M_x, np.ones(len(p_M_x)) * 1 / len(p_M_x))
def make_p_M_gr_surface_break(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Creates a PDF based on a Gutenberg-Richter distribution that is then
modified to account for the decreasing likelihood of surface rupture
with decreasing magnitude (distribution from Biasi and Weldon 2006,
figure 8b.
Returns:
--------
p_M : Pdf class with a modified Gutenberg-Richter distribution.
"""
p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step,
n_M=n_M)
p_M_gr_sb = Pdf(gr_pm_x, gr_pm_y)
p_M_gr_sb_y = p_M_gr_sb(p_M_x)
return Pdf(p_M_x, p_M_gr_sb_y)
def make_p_M(p_M_type='uniform', p_M_min=None, p_M_max=None, M_step=None,
n_M=None):
"""
Creates the a PDF of magnitudes to use as the prior p(M).
Parameters
----------
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum magnitude.
p_M_max : Maximum magnitude.
M_step : Width of steps in interpolation (no effect on final results).
n_M : number of points in interpolation (no effect on final results).
Returns
-------
p_M : Pdf function with a uniform distribution between p_M_min and p_M_max
"""
if p_M_type == 'uniform':
p_M = make_p_M_uniform(p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
elif p_M_type == 'GR_surface_break':
p_M = make_p_M_gr_surface_break(p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
return p_M
def p_M_D(D, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
ref='BW_2006', p_M_type='uniform', sample_bias_corr=False):
"""
Calculates p(M|D), the posterior probability of an earthquake having a
magnitude of M given observed displacement D, based on Biasi and Weldon
2006 (but with optional sample bias correction).
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
D : Scalar or vector of displacements in meters (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_max : Maximum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
ref : Reference for magnitude-displacement scaling relationships. See
`M_from_D` for a list of implemented relationships.
sample_bias_correction: Boolean indicating whether to correct for
preferential sampling of scarps proportionally
to the offset at a point relative to the min
and max offsets.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
else:
#TODO: maybe add some logic for dealing with non `Pdf` priors
pass
p_D = Pdf(p_M.x, [np.trapz(Dn_y, Dn_x * D_from_M(M, ref=ref))
for M in p_M.x])
p_D_M_ = p_D_M(D, p_M.x, ref=ref, sample_bias_corr=sample_bias_corr)
p_M_D_ = multiply_pdfs(p_M, p_D_M_, step=M_step)
p_M_D_ = divide_pdfs(p_M_D_, p_D, step=M_step)
return p_M_D_
def p_M_L(L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
p_M_type='uniform', ref='WC_1994_all', mc=True):
"""
Calculates p(M|L), the posterior probability of an earthquake having a
magnitude of M given observed length L.
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
L : Scalar or vector of lengths in kilometers (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_max : Maximum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
ref : Reference for magnitude-length scaling relationships. See `M_from_L`
for a list of implemented relationships.
mc : Boolean that describes whether to propagate the uncertainty (standard
errors) in the scaling relationship to the posterior using a Monte
Carlo simulation.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
p_M_L_samples = M_from_L(L, ref=ref, mc=mc)
p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(),
x_max=p_M.x.max())
p_M_L_ = multiply_pdfs(p_M, p_M_L_)
return p_M_L_
def p_M_DL(D, L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
p_M_type='uniform', D_ref='BW_2006', L_ref='WC_1994_all',
L_mc=True, sample_bias_corr=False):
"""
Calculates p(M|D,L), the posterior probability of an earthquake having a
magnitude of M given observed offset/displacement D and rupture length L.
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
D : Scalar or vector of displacement in meters (floats).
L : Scalar or vector of lengths in kilometers (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
D_ref : Reference for magnitude-displacement scaling relationships. See
`M_from_D` for a list of implemented relationships.
L_ref : Reference for magnitude-length scaling relationships. See
`M_from_L` for a list of implemented relationships.
mc : Boolean that describes whether to propagate the uncertainty (standard
errors) in the scaling relationship to the posterior using a Monte
Carlo simulation.
sample_bias_correction: Boolean indicating whether to correct for
preferential sampling of scarps proportionally
to the offset at a point relative to the min
and max offsets.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
p_M_D_ = p_M_D(D, p_M, ref=D_ref, sample_bias_corr=sample_bias_corr)
p_M_L_samples = M_from_L(L, ref=L_ref, mc=L_mc)
p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(),
x_max=p_M.x.max())
return multiply_pdfs(p_M_L_, p_M_D_)
| mit | 3,321,407,271,756,784,000 | 33.628615 | 79 | 0.551976 | false |
huwiki/featured-feeds | rsslib.py | 1 | 4281 | #!/usr/bin/python
# -*- coding: iso-8859-2 -*-
import sys, os
import re, string
import time, datetime, calendar, locale
import urllib
import cPickle
import xml.sax.saxutils
locale.setlocale(locale.LC_TIME, 'en_GB')
currenttimestamp = time.strftime(u'%a, %d %b %Y %H:%M:%S +0000', time.gmtime())
locale.setlocale(locale.LC_TIME, 'hu_HU')
# general settings
settings = {
'rss_webmaster': u'[email protected] (Tisza Gergõ)',
'program_name': 'WpFeedMaker',
'program_version': '1.0',
'program_contact': '[email protected]',
}
# helpers
def encode_title(s):
s = s[0:1].upper() + s[1:]
s = re.sub(' ', '_', s)
return urllib.quote(s.encode('utf-8'))
def date_vars(date, extend = {}):
if date.isocalendar()[2] < 4:
n = 1
else:
n = 2
iso = date.isocalendar()
dict = {
'year': iso[0],
'years1': iso[0] % 5,
'years2': iso[0] % 5 + 5,
'month': date.month,
'monthname': calendar.month_name[date.month].decode('iso-8859-2'),
'day' : date.day,
'week': iso[1],
'dow' : iso[2],
'n' : n,
}
dict.update(extend)
return dict
# Subclassing of URLopener - sets "User-agent: ", which Wikipedia requires to be set
# to something else than the default "Python-urllib"
class MyURLopener(urllib.URLopener):
version = settings['program_name'] + "/" + settings['program_version'] + " " + settings['program_contact']
# Caching of HTML from Wikipedia
class CacheItem:
def __init__(self, html, date, fetchtime):
self.html = html
self.date = date
self.fetchtime = fetchtime
class WPCache:
def __init__(self, settings):
self.settings = settings
self.url_opener = MyURLopener()
self.filename = self.settings['cache_filename']
if (os.path.exists(self.filename)):
file = open(self.filename)
self.cache = cPickle.load(file)
file.close()
else:
self.cache = {}
def get_html(self, url, date):
if url in self.cache:
return self.cache[url].html
else:
html = self.url_opener.open(url).read()
cacheitem = CacheItem(html, date, time.gmtime())
self.cache[url] = cacheitem
return html
# Weed out old entries, so cache doesn't get big
def too_old(self, date):
return (datetime.date.today() - date).days > self.settings['time_range']
def weed_out_old(self):
self.cache = dict([x for x in self.cache.items() if not self.too_old(x[1].date)])
def save(self):
self.weed_out_old()
file = open(self.filename, "w")
p = cPickle.Pickler(file)
p.dump(self.cache)
class WPFeed:
def __init__(self, settings):
self.settings = settings
self.cache = WPCache(self.settings)
def get_html(self, url, date, clean = True):
html = self.cache.get_html(url, date)
if clean:
html = re.sub('\s*<!--[\s\S]*?-->\s*', '', html)
return html
def rss_item(self, item):
return """<item>
<title>%(title)s</title>
<link>%(url)s</link>
<guid isPermaLink="true">%(url)s</guid>
<description>%(escaped_content)s</description>
</item>
""" % {
'title': xml.sax.saxutils.escape(item['title']),
'url': item['url'],
'escaped_content': xml.sax.saxutils.escape(item['content']),
}
def rss(self, items):
self.xml = """<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:blogChannel="http://backend.userland.com/blogChannelModule">
<channel>
<title>%(rss_title)s</title>
<link>%(rss_link)s</link>
<description>%(rss_description)s</description>
<language>hu</language>
<copyright>CC-BY-SA-3.0</copyright>
<lastBuildDate>%(build_date)s</lastBuildDate>
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
<webMaster>%(webmaster)s</webMaster>
<generator>%(generator)s</generator>
%(items)s
</channel>
</rss>
""" % {
'rss_title': self.settings['rss_title'],
'rss_link': self.settings['rss_link'],
'rss_description': self.settings['rss_description'],
'webmaster': settings['rss_webmaster'],
'build_date': currenttimestamp,
'items': '\n'.join(map(self.rss_item, items)),
'generator': settings['program_name'] + ' ' + settings['program_version'],
}
def save(self):
file = open(self.settings['output_filename'], "w")
file.write(self.xml.encode('utf-8'))
file.close()
self.cache.save()
def main():
print "This file cannot be invoked directly"
sys.exit(1)
if __name__ == '__main__':
main()
| mit | -2,498,234,806,636,101,000 | 25.103659 | 107 | 0.640972 | false |
mgedmin/objgraph | objgraph.py | 1 | 43531 | """
Tools for drawing Python object reference graphs with graphviz.
You can find documentation online at https://mg.pov.lt/objgraph/
Copyright (c) 2008-2017 Marius Gedminas <[email protected]> and contributors
Released under the MIT licence.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import codecs
import collections
import gc
import inspect
import itertools
import operator
import os
import re
import subprocess
import sys
import tempfile
import types
try:
# Python 2.x compatibility
from StringIO import StringIO
except ImportError: # pragma: PY3
from io import StringIO
try:
from types import InstanceType
except ImportError: # pragma: PY3
# Python 3.x compatibility
InstanceType = None
__author__ = "Marius Gedminas ([email protected])"
__copyright__ = "Copyright (c) 2008-2017 Marius Gedminas and contributors"
__license__ = "MIT"
__version__ = '3.5.1.dev0'
__date__ = '2020-10-11'
try:
basestring
except NameError: # pragma: PY3
# Python 3.x compatibility
basestring = str
try:
iteritems = dict.iteritems
except AttributeError: # pragma: PY3
# Python 3.x compatibility
iteritems = dict.items
IS_INTERACTIVE = False
try: # pragma: nocover
import graphviz
if 'TerminalInteractiveShell' not in get_ipython().__class__.__name__:
# So far I know two shells where it's inappropriate to use inline
# graphics, because they're text only:
# - ipython uses a TerminalInteractiveShell
# - pycharm's console uses PyDevTerminalInteractiveShell
IS_INTERACTIVE = True
except (NameError, ImportError):
pass
def _isinstance(object, classinfo):
"""Return whether an object is an instance of a class or its subclass.
Differs from the builtin isinstance() implementation in that it does not
depend on the ``__class__`` attribute which is proxied by
mock.Mock(spec=...).
"""
return issubclass(type(object), classinfo)
def count(typename, objects=None):
"""Count objects tracked by the garbage collector with a given class name.
The class name can optionally be fully qualified.
Example:
>>> count('dict')
42
>>> count('mymodule.MyClass')
2
.. note::
The Python garbage collector does not track simple
objects like int or str. See
https://docs.python.org/3/library/gc.html#gc.is_tracked
for more information.
Instead of looking through all objects tracked by the GC, you may
specify your own collection, e.g.
>>> count('MyClass', get_leaking_objects())
3
See also: :func:`get_leaking_objects`.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return sum(1 for o in objects if _long_typename(o) == typename)
else:
return sum(1 for o in objects if _short_typename(o) == typename)
finally:
del objects # clear cyclic references to frame
def typestats(objects=None, shortnames=True, filter=None):
"""Count the number of instances for each type tracked by the GC.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together if ``shortnames`` is True.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
Example:
>>> typestats()
{'list': 12041, 'tuple': 10245, ...}
>>> typestats(get_leaking_objects())
{'MemoryError': 1, 'tuple': 2795, 'RuntimeError': 1, 'list': 47, ...}
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if objects is None:
objects = gc.get_objects()
try:
if shortnames:
typename = _short_typename
else:
typename = _long_typename
stats = {}
for o in objects:
if filter and not filter(o):
continue
n = typename(o)
stats[n] = stats.get(n, 0) + 1
return stats
finally:
del objects # clear cyclic references to frame
def most_common_types(limit=10, objects=None, shortnames=True, filter=None):
"""Count the names of types with the most instances.
Returns a list of (type_name, count), sorted most-frequent-first.
Limits the return value to at most ``limit`` items. You may set ``limit``
to None to avoid that.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> most_common_types(limit=2)
[('list', 12041), ('tuple', 10245)]
.. versionadded:: 1.4
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
stats = sorted(
typestats(objects, shortnames=shortnames, filter=filter).items(),
key=operator.itemgetter(1), reverse=True)
if limit:
stats = stats[:limit]
return stats
def show_most_common_types(
limit=10,
objects=None,
shortnames=True,
file=None,
filter=None):
"""Print the table of types of most common instances.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.0
New parameter: ``file``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if file is None:
file = sys.stdout
stats = most_common_types(limit, objects, shortnames=shortnames,
filter=filter)
width = max(len(name) for name, count in stats)
for name, count in stats:
file.write('%-*s %i\n' % (width, name, count))
def growth(limit=10, peak_stats={}, shortnames=True, filter=None):
"""Count the increase in peak object since last call.
Returns a list of (type_name, total_count, increase_delta),
descending order by increase_delta.
Limits the output to ``limit`` largest deltas. You may set ``limit`` to
None to see all of them.
Uses and updates ``peak_stats``, a dictionary from type names to previously
seen peak object counts. Usually you don't need to pay attention to this
argument.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> growth(2)
[(tuple, 12282, 10), (dict, 1922, 7)]
.. versionadded:: 3.3.0
"""
gc.collect()
stats = typestats(shortnames=shortnames, filter=filter)
deltas = {}
for name, count in iteritems(stats):
old_count = peak_stats.get(name, 0)
if count > old_count:
deltas[name] = count - old_count
peak_stats[name] = count
deltas = sorted(deltas.items(), key=operator.itemgetter(1),
reverse=True)
if limit:
deltas = deltas[:limit]
return [(name, stats[name], delta) for name, delta in deltas]
def show_growth(limit=10, peak_stats=None, shortnames=True, file=None,
filter=None):
"""Show the increase in peak object counts since last call.
if ``peak_stats`` is None, peak object counts will recorded in
func `growth`, and your can record the counts by yourself with set
``peak_stats`` to a dictionary.
The caveats documented in :func:`growth` apply.
Example:
>>> show_growth()
wrapper_descriptor 970 +14
tuple 12282 +10
dict 1922 +7
...
.. versionadded:: 1.5
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.1
New parameter: ``file``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if peak_stats is None:
result = growth(limit, shortnames=shortnames, filter=filter)
else:
result = growth(limit, peak_stats, shortnames, filter)
if result:
if file is None:
file = sys.stdout
width = max(len(name) for name, _, _ in result)
for name, count, delta in result:
file.write('%-*s%9d %+9d\n' % (width, name, count, delta))
def get_new_ids(skip_update=False, limit=10, sortby='deltas',
shortnames=None, file=None, _state={}):
"""Find and display new objects allocated since last call.
Shows the increase in object counts since last call to this
function and returns the memory address ids for new objects.
Returns a dictionary mapping object type names to sets of object IDs
that have been created since the last time this function was called.
``skip_update`` (bool): If True, returns the same dictionary that
was returned during the previous call without updating the internal
state or examining the objects currently in memory.
``limit`` (int): The maximum number of rows that you want to print
data for. Use 0 to suppress the printing. Use None to print everything.
``sortby`` (str): This is the column that you want to sort by in
descending order. Possible values are: 'old', 'current', 'new',
'deltas'
``shortnames`` (bool): If True, classes with the same name but
defined in different modules will be lumped together. If False,
all type names will be qualified with the module name. If None (default),
``get_new_ids`` will remember the value from previous calls, so it's
enough to prime this once. By default the primed value is True.
``_state`` (dict): Stores old, current, and new_ids in memory.
It is used by the function to store the internal state between calls.
Never pass in this argument unless you know what you're doing.
The caveats documented in :func:`growth` apply.
When one gets new_ids from :func:`get_new_ids`, one can use
:func:`at_addrs` to get a list of those objects. Then one can iterate over
the new objects, print out what they are, and call :func:`show_backrefs` or
:func:`show_chain` to see where they are referenced.
Example:
>>> _ = get_new_ids() # store current objects in _state
>>> _ = get_new_ids() # current_ids become old_ids in _state
>>> a = [0, 1, 2] # list we don't know about
>>> b = [3, 4, 5] # list we don't know about
>>> new_ids = get_new_ids(limit=3) # we see new lists
======================================================================
Type Old_ids Current_ids New_ids Count_Deltas
======================================================================
list 324 326 +3 +2
dict 1125 1125 +0 +0
wrapper_descriptor 1001 1001 +0 +0
======================================================================
>>> new_lists = at_addrs(new_ids['list'])
>>> a in new_lists
True
>>> b in new_lists
True
.. versionadded:: 3.4
"""
if not _state:
_state['old'] = collections.defaultdict(set)
_state['current'] = collections.defaultdict(set)
_state['new'] = collections.defaultdict(set)
_state['shortnames'] = True
new_ids = _state['new']
if skip_update:
return new_ids
old_ids = _state['old']
current_ids = _state['current']
if shortnames is None:
shortnames = _state['shortnames']
else:
_state['shortnames'] = shortnames
gc.collect()
objects = gc.get_objects()
for class_name in old_ids:
old_ids[class_name].clear()
for class_name, ids_set in current_ids.items():
old_ids[class_name].update(ids_set)
for class_name in current_ids:
current_ids[class_name].clear()
for o in objects:
if shortnames:
class_name = _short_typename(o)
else:
class_name = _long_typename(o)
id_number = id(o)
current_ids[class_name].add(id_number)
for class_name in new_ids:
new_ids[class_name].clear()
rows = []
keys_to_remove = []
for class_name in current_ids:
num_old = len(old_ids[class_name])
num_current = len(current_ids[class_name])
if num_old == 0 and num_current == 0:
# remove the key from our dicts if we don't have any old or
# current class_name objects
keys_to_remove.append(class_name)
continue
new_ids_set = current_ids[class_name] - old_ids[class_name]
new_ids[class_name].update(new_ids_set)
num_new = len(new_ids_set)
num_delta = num_current - num_old
row = (class_name, num_old, num_current, num_new, num_delta)
rows.append(row)
for key in keys_to_remove:
del old_ids[key]
del current_ids[key]
del new_ids[key]
index_by_sortby = {'old': 1, 'current': 2, 'new': 3, 'deltas': 4}
rows.sort(key=operator.itemgetter(index_by_sortby[sortby], 0),
reverse=True)
if limit is not None:
rows = rows[:limit]
if not rows:
return new_ids
if file is None:
file = sys.stdout
width = max(len(row[0]) for row in rows)
print('='*(width+13*4), file=file)
print('%-*s%13s%13s%13s%13s' %
(width, 'Type', 'Old_ids', 'Current_ids', 'New_ids', 'Count_Deltas'),
file=file)
print('='*(width+13*4), file=file)
for row_class, old, current, new, delta in rows:
print('%-*s%13d%13d%+13d%+13d' %
(width, row_class, old, current, new, delta), file=file)
print('='*(width+13*4), file=file)
return new_ids
def get_leaking_objects(objects=None):
"""Return objects that do not have any referents.
These could indicate reference-counting bugs in C code. Or they could
be legitimate.
Note that the GC does not track simple objects like int or str.
.. versionadded:: 1.7
"""
if objects is None:
gc.collect()
objects = gc.get_objects()
try:
ids = set(id(i) for i in objects)
for i in objects:
ids.difference_update(id(j) for j in gc.get_referents(i))
# this then is our set of objects without referrers
return [i for i in objects if id(i) in ids]
finally:
del objects, i # clear cyclic references to frame
def by_type(typename, objects=None):
"""Return objects tracked by the garbage collector with a given class name.
Example:
>>> by_type('MyClass')
[<mymodule.MyClass object at 0x...>]
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return [o for o in objects if _long_typename(o) == typename]
else:
return [o for o in objects if _short_typename(o) == typename]
finally:
del objects # clear cyclic references to frame
def at(addr):
"""Return an object at a given memory address.
The reverse of id(obj):
>>> at(id(obj)) is obj
True
Note that this function does not work on objects that are not tracked by
the GC (e.g. ints or strings).
"""
for o in gc.get_objects():
if id(o) == addr:
return o
return None
def at_addrs(address_set):
"""Return a list of objects for a given set of memory addresses.
The reverse of [id(obj1), id(obj2), ...]. Note that objects are returned
in an arbitrary order.
When one gets ``new_ids`` from :func:`get_new_ids`, one can use this
function to get a list of those objects. Then one can iterate over the new
objects, print out what they are, and call :func:`show_backrefs` or
:func:`show_chain` to see where they are referenced.
>>> a = [0, 1, 2]
>>> new_ids = get_new_ids()
>>> new_lists = at_addrs(new_ids['list'])
>>> a in new_lists
True
Note that this function does not work on objects that are not tracked
by the GC (e.g. ints or strings).
.. versionadded:: 3.4
"""
res = []
for o in gc.get_objects():
if id(o) in address_set:
res.append(o)
return res
def find_ref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading from obj.
The end of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_ref_chain(obj, lambda x: isinstance(x, MyClass))
[obj, ..., <MyClass object at ...>]
Returns ``[obj]`` if such a chain could not be found.
.. versionadded:: 1.7
"""
return _find_chain(obj, predicate, gc.get_referents,
max_depth=max_depth, extra_ignore=extra_ignore)[::-1]
def find_backref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading to obj.
The start of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_backref_chain(obj, is_proper_module)
[<module ...>, ..., obj]
Returns ``[obj]`` if such a chain could not be found.
.. versionchanged:: 1.5
Returns ``obj`` instead of ``None`` when a chain could not be found.
"""
return _find_chain(obj, predicate, gc.get_referrers,
max_depth=max_depth, extra_ignore=extra_ignore)
def show_backrefs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
extra_node_attrs=None):
"""Generate an object reference graph ending at ``objs``.
The graph will show you what objects refer to ``objs``, directly and
indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a image
file, whose extension indicates the desired output format; note
that output to a specific format is entirely handled by GraphViz:
if the desired format is not supported, you just get the .dot
file. If ``filename`` and ``output`` are not specified, ``show_backrefs``
will try to display the graph inline (if you're using IPython), otherwise
it'll try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_backrefs`` will convert the .dot file to a
.png and print its name.
``output`` if specified, the GraphViz output will be written to this
file object. ``output`` and ``filename`` should not both be specified.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function taking one argument and returning a
string) to report extra information for objects.
Use ``extra_node_attrs`` (a function taking the current object as argument,
returning a dict of strings) to add extra attributes to the nodes. See
https://www.graphviz.org/doc/info/attrs.html for a list of possible node
attributes.
Specify ``refcounts=True`` if you want to see reference counts.
These will mostly match the number of arrows pointing to an object,
but can be different for various reasons.
Specify ``shortnames=False`` if you want to see fully-qualified type
names ('package.module.ClassName'). By default you get to see only the
class name part.
Examples:
>>> show_backrefs(obj)
>>> show_backrefs([obj1, obj2])
>>> show_backrefs(obj, max_depth=5)
>>> show_backrefs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_backrefs(obj, highlight=inspect.isclass)
>>> show_backrefs(obj, extra_ignore=[id(locals())])
>>> show_backrefs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x))))
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
New parameter: ``refcounts``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.0
New parameter: ``output``.
.. versionchanged:: 3.5
New parameter: ``extra_node_attrs``.
"""
# For show_backrefs(), it makes sense to stop when reaching a
# module because you'll end up in sys.modules and explode the
# graph with useless clutter. That's why we're specifying
# cull_func here, but not in show_graph().
return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referrers, swap_source_target=False,
filename=filename, output=output, extra_info=extra_info,
refcounts=refcounts, shortnames=shortnames,
cull_func=is_proper_module,
extra_node_attrs=extra_node_attrs)
def show_refs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
extra_node_attrs=None):
"""Generate an object reference graph starting at ``objs``.
The graph will show you what objects are reachable from ``objs``, directly
and indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a image
file, whose extension indicates the desired output format; note
that output to a specific format is entirely handled by GraphViz:
if the desired format is not supported, you just get the .dot
file. If ``filename`` and ``output`` is not specified, ``show_refs`` will
try to display the graph inline (if you're using IPython), otherwise it'll
try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_refs`` will convert the .dot file to a
.png and print its name.
``output`` if specified, the GraphViz output will be written to this
file object. ``output`` and ``filename`` should not both be specified.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function returning a string) to report extra
information for objects.
Use ``extra_node_attrs`` (a function taking the current object as argument,
returning a dict of strings) to add extra attributes to the nodes. See
https://www.graphviz.org/doc/info/attrs.html for a list of possible node
attributes.
Specify ``refcounts=True`` if you want to see reference counts.
Examples:
>>> show_refs(obj)
>>> show_refs([obj1, obj2])
>>> show_refs(obj, max_depth=5)
>>> show_refs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_refs(obj, highlight=inspect.isclass)
>>> show_refs(obj, extra_ignore=[id(locals())])
>>> show_refs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x))))
.. versionadded:: 1.1
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
Follows references from module objects instead of stopping.
New parameter: ``refcounts``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.0
New parameter: ``output``.
.. versionchanged:: 3.5
New parameter: ``extra_node_attrs``.
"""
return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referents, swap_source_target=True,
filename=filename, extra_info=extra_info,
refcounts=refcounts, shortnames=shortnames,
output=output, extra_node_attrs=extra_node_attrs)
def show_chain(*chains, **kw):
"""Show a chain (or several chains) of object references.
Useful in combination with :func:`find_ref_chain` or
:func:`find_backref_chain`, e.g.
>>> show_chain(find_backref_chain(obj, is_proper_module))
You can specify if you want that chain traced backwards or forwards
by passing a ``backrefs`` keyword argument, e.g.
>>> show_chain(find_ref_chain(obj, is_proper_module),
... backrefs=False)
Ideally this shouldn't matter, but for some objects
:func:`gc.get_referrers` and :func:`gc.get_referents` are not perfectly
symmetrical.
You can specify ``highlight``, ``extra_info``, ``refcounts``,
``shortnames``, ``filename`` or ``output`` arguments like for
:func:`show_backrefs` or :func:`show_refs`.
.. versionadded:: 1.5
.. versionchanged:: 1.7
New parameter: ``backrefs``.
.. versionchanged:: 2.0
New parameter: ``output``.
"""
backrefs = kw.pop('backrefs', True)
chains = [chain for chain in chains if chain] # remove empty ones
def in_chains(x, ids=set(map(id, itertools.chain(*chains)))):
return id(x) in ids
max_depth = max(map(len, chains)) - 1
if backrefs:
show_backrefs([chain[-1] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
else:
show_refs([chain[0] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
def is_proper_module(obj):
"""
Returns ``True`` if ``obj`` can be treated like a garbage collector root.
That is, if ``obj`` is a module that is in ``sys.modules``.
>>> import types
>>> is_proper_module([])
False
>>> is_proper_module(types)
True
>>> is_proper_module(types.ModuleType('foo'))
False
.. versionadded:: 1.8
"""
return (
inspect.ismodule(obj)
and obj is sys.modules.get(getattr(obj, '__name__', None))
)
#
# Internal helpers
#
def _find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()):
queue = [obj]
depth = {id(obj): 0}
parent = {id(obj): None}
ignore = set(extra_ignore)
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(parent))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain
gc.collect()
while queue:
target = queue.pop(0)
if predicate(target):
chain = [target]
while parent[id(target)] is not None:
target = parent[id(target)]
chain.append(target)
return chain
tdepth = depth[id(target)]
if tdepth < max_depth:
referrers = edge_func(target)
ignore.add(id(referrers))
for source in referrers:
if id(source) in ignore:
continue
if id(source) not in depth:
depth[id(source)] = tdepth + 1
parent[id(source)] = target
queue.append(source)
return [obj] # not found
def _show_graph(objs, edge_func, swap_source_target,
max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
cull_func=None, extra_node_attrs=None):
if not _isinstance(objs, (list, tuple)):
objs = [objs]
is_interactive = False
if filename and output:
raise ValueError('Cannot specify both output and filename.')
elif output:
f = output
elif filename and filename.endswith('.dot'):
f = codecs.open(filename, 'w', encoding='utf-8')
dot_filename = filename
elif IS_INTERACTIVE and not filename:
is_interactive = True
f = StringIO()
else:
fd, dot_filename = tempfile.mkstemp(prefix='objgraph-',
suffix='.dot', text=True)
f = os.fdopen(fd, "w")
if getattr(f, 'encoding', None): # pragma: PY3
# Python 3 will wrap the file in the user's preferred encoding
# Re-wrap it for utf-8
import io
f = io.TextIOWrapper(f.detach(), 'utf-8')
f.write('digraph ObjectGraph {\n'
' node[shape=box, style=filled, fillcolor=white];\n')
queue = []
depth = {}
ignore = set(extra_ignore)
ignore.add(id(objs))
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe().f_locals))
ignore.add(id(sys._getframe(1))) # show_refs/show_backrefs
ignore.add(id(sys._getframe(1).f_locals))
for obj in objs:
f.write(' %s[fontcolor=red];\n' % (_obj_node_id(obj)))
depth[id(obj)] = 0
queue.append(obj)
del obj
gc.collect()
nodes = 0
while queue:
nodes += 1
# The names "source" and "target" are reversed here because
# originally there was just show_backrefs() and we were
# traversing the reference graph backwards.
target = queue.pop(0)
tdepth = depth[id(target)]
f.write(' %s[label="%s"%s];\n' % (_obj_node_id(target),
_obj_label(target, extra_info,
refcounts, shortnames),
_obj_attrs(target,
extra_node_attrs)))
h, s, v = _gradient((0, 0, 1), (0, 0, .3), tdepth, max_depth)
if inspect.ismodule(target):
h = .3
s = 1
if highlight and highlight(target):
h = .6
s = .6
v = 0.5 + v * 0.5
f.write(' %s[fillcolor="%g,%g,%g"];\n'
% (_obj_node_id(target), h, s, v))
if v < 0.5:
f.write(' %s[fontcolor=white];\n' % (_obj_node_id(target)))
if hasattr(getattr(target, '__class__', None), '__del__'):
f.write(' %s->%s_has_a_del[color=red,style=dotted,'
'len=0.25,weight=10];\n' % (_obj_node_id(target),
_obj_node_id(target)))
f.write(' %s_has_a_del[label="__del__",shape=doublecircle,'
'height=0.25,color=red,fillcolor="0,.5,1",fontsize=6];\n'
% (_obj_node_id(target)))
if tdepth >= max_depth:
continue
if cull_func is not None and cull_func(target):
continue
neighbours = edge_func(target)
ignore.add(id(neighbours))
n = 0
skipped = 0
for source in neighbours:
if id(source) in ignore:
continue
if filter and not filter(source):
continue
if n >= too_many:
skipped += 1
continue
if swap_source_target:
srcnode, tgtnode = target, source
else:
srcnode, tgtnode = source, target
elabel = _edge_label(srcnode, tgtnode, shortnames)
f.write(' %s -> %s%s;\n' % (_obj_node_id(srcnode),
_obj_node_id(tgtnode), elabel))
if id(source) not in depth:
depth[id(source)] = tdepth + 1
queue.append(source)
n += 1
del source
del neighbours
if skipped > 0:
h, s, v = _gradient((0, 1, 1), (0, 1, .3), tdepth + 1, max_depth)
if swap_source_target:
label = "%d more references" % skipped
edge = "%s->too_many_%s" % (_obj_node_id(target),
_obj_node_id(target))
else:
label = "%d more backreferences" % skipped
edge = "too_many_%s->%s" % (_obj_node_id(target),
_obj_node_id(target))
f.write(' %s[color=red,style=dotted,len=0.25,weight=10];\n'
% edge)
f.write(' too_many_%s[label="%s",shape=box,height=0.25,'
'color=red,fillcolor="%g,%g,%g",fontsize=6];\n'
% (_obj_node_id(target), label, h, s, v))
f.write(' too_many_%s[fontcolor=white];\n'
% (_obj_node_id(target)))
f.write("}\n")
if output:
return
if is_interactive:
return graphviz.Source(f.getvalue())
else:
# The file should only be closed if this function was in charge of
# opening the file.
f.close()
print("Graph written to %s (%d nodes)" % (dot_filename, nodes))
_present_graph(dot_filename, filename)
def _present_graph(dot_filename, filename=None):
"""Present a .dot file to the user in the requested fashion.
If ``filename`` is provided, runs ``dot`` to convert the .dot file
into the desired format, determined by the filename extension.
If ``filename`` is not provided, tries to launch ``xdot``, a
graphical .dot file viewer. If ``xdot`` is not present on the system,
converts the graph to a PNG.
"""
if filename == dot_filename:
# nothing to do, the user asked for a .dot file and got it
return
if not filename and _program_in_path('xdot'):
print("Spawning graph viewer (xdot)")
subprocess.Popen(['xdot', dot_filename], close_fds=True)
elif _program_in_path('dot'):
if not filename:
print("Graph viewer (xdot) not found, generating a png instead")
filename = dot_filename[:-4] + '.png'
stem, ext = os.path.splitext(filename)
cmd = ['dot', '-T' + ext[1:], '-o' + filename, dot_filename]
dot = subprocess.Popen(cmd, close_fds=False)
dot.wait()
if dot.returncode != 0:
# XXX: shouldn't this go to stderr or a log?
print('dot failed (exit code %d) while executing "%s"'
% (dot.returncode, ' '.join(cmd)))
else:
print("Image generated as %s" % filename)
else:
if not filename:
print("Graph viewer (xdot) and image renderer (dot) not found,"
" not doing anything else")
else:
print("Image renderer (dot) not found, not doing anything else")
def _obj_node_id(obj):
return ('o%d' % id(obj)).replace('-', '_')
def _obj_attrs(obj, extra_node_attrs):
if extra_node_attrs is not None:
attrs = extra_node_attrs(obj)
return ", " + ", ".join('%s="%s"' % (name, _quote(value))
for name, value in sorted(iteritems(attrs))
if value is not None)
else:
return ""
def _obj_label(obj, extra_info=None, refcounts=False, shortnames=True):
if shortnames:
label = [_short_typename(obj)]
else:
label = [_long_typename(obj)]
if refcounts:
label[0] += ' [%d]' % (sys.getrefcount(obj) - 4)
# Why -4? To ignore the references coming from
# obj_label's frame (obj)
# show_graph's frame (target variable)
# sys.getrefcount()'s argument
# something else that doesn't show up in gc.get_referrers()
label.append(_safe_repr(obj))
if extra_info:
label.append(str(extra_info(obj)))
return _quote('\n'.join(label))
def _quote(s):
return (s.replace("\\", "\\\\")
.replace("\"", "\\\"")
.replace("\n", "\\n")
.replace("\0", "\\\\0"))
def _get_obj_type(obj):
objtype = type(obj)
if type(obj) == InstanceType: # pragma: PY2 -- no old-style classes on PY3
objtype = obj.__class__
return objtype
def _short_typename(obj):
return _get_obj_type(obj).__name__
def _long_typename(obj):
objtype = _get_obj_type(obj)
name = objtype.__name__
module = getattr(objtype, '__module__', None)
if module:
return '%s.%s' % (module, name)
else:
return name
def _safe_repr(obj):
try:
return _short_repr(obj)
except Exception:
return '(unrepresentable)'
def _name_or_repr(value):
try:
result = value.__name__
except AttributeError:
result = repr(value)[:40]
if _isinstance(result, basestring):
return result
else:
return repr(value)[:40]
def _short_repr(obj):
if _isinstance(obj, (type, types.ModuleType, types.BuiltinMethodType,
types.BuiltinFunctionType)):
return _name_or_repr(obj)
if _isinstance(obj, types.MethodType):
name = _name_or_repr(obj.__func__)
if obj.__self__:
return name + ' (bound)'
else: # pragma: PY2 -- no unbound methods on Python 3
return name
# NB: types.LambdaType is an alias for types.FunctionType!
if _isinstance(obj, types.LambdaType) and obj.__name__ == '<lambda>':
return 'lambda: %s:%s' % (os.path.basename(obj.__code__.co_filename),
obj.__code__.co_firstlineno)
if _isinstance(obj, types.FrameType):
return '%s:%s' % (obj.f_code.co_filename, obj.f_lineno)
if _isinstance(obj, (tuple, list, dict, set)):
return '%d items' % len(obj)
return repr(obj)[:40]
def _gradient(start_color, end_color, depth, max_depth):
if max_depth == 0:
# avoid division by zero
return start_color
h1, s1, v1 = start_color
h2, s2, v2 = end_color
f = float(depth) / max_depth
h = h1 * (1-f) + h2 * f
s = s1 * (1-f) + s2 * f
v = v1 * (1-f) + v2 * f
return h, s, v
def _edge_label(source, target, shortnames=True):
if (_isinstance(target, dict)
and target is getattr(source, '__dict__', None)):
return ' [label="__dict__",weight=10]'
if _isinstance(source, types.FrameType):
if target is source.f_locals:
return ' [label="f_locals",weight=10]'
if target is source.f_globals:
return ' [label="f_globals",weight=10]'
if _isinstance(source, types.MethodType):
try:
if target is source.__self__:
return ' [label="__self__",weight=10]'
if target is source.__func__:
return ' [label="__func__",weight=10]'
except AttributeError: # pragma: nocover
# Python < 2.6 compatibility
if target is source.im_self:
return ' [label="im_self",weight=10]'
if target is source.im_func:
return ' [label="im_func",weight=10]'
if _isinstance(source, types.FunctionType):
for k in dir(source):
if target is getattr(source, k):
return ' [label="%s",weight=10]' % _quote(k)
if _isinstance(source, dict):
for k, v in iteritems(source):
if v is target:
if _isinstance(k, basestring) and _is_identifier(k):
return ' [label="%s",weight=2]' % _quote(k)
else:
if shortnames:
tn = _short_typename(k)
else:
tn = _long_typename(k)
return ' [label="%s"]' % _quote(tn + "\n" + _safe_repr(k))
return ''
_is_identifier = re.compile('[a-zA-Z_][a-zA-Z_0-9]*$').match
def _program_in_path(program):
# XXX: Consider using distutils.spawn.find_executable or shutil.which
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path = [os.path.join(dir, program) for dir in path]
path = [True for file in path
if os.path.isfile(file) or os.path.isfile(file + '.exe')]
return bool(path)
| mit | 6,699,779,760,792,617,000 | 33.575854 | 79 | 0.588202 | false |
t794104/ansible | lib/ansible/plugins/inventory/gcp_compute.py | 1 | 19487 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: gcp_compute
plugin_type: inventory
short_description: Google Cloud Compute Engine inventory source
requirements:
- requests >= 2.18.4
- google-auth >= 1.3.0
extends_documentation_fragment:
- constructed
- inventory_cache
description:
- Get inventory hosts from Google Cloud Platform GCE.
- Uses a YAML configuration file that ends with gcp_compute.(yml|yaml) or gcp.(yml|yaml).
options:
plugin:
description: token that ensures this is a source file for the 'gcp_compute' plugin.
required: True
choices: ['gcp_compute']
zones:
description: A list of regions in which to describe GCE instances.
If none provided, it defaults to all zones available to a given project.
type: list
projects:
description: A list of projects in which to describe GCE instances.
type: list
required: True
filters:
description: >
A list of filter value pairs. Available filters are listed here
U(https://cloud.google.com/compute/docs/reference/rest/v1/instances/aggregatedList).
Each additional filter in the list will act be added as an AND condition
(filter1 and filter2)
type: list
hostnames:
description: A list of options that describe the ordering for which
hostnames should be assigned. Currently supported hostnames are
'public_ip', 'private_ip', or 'name'.
default: ['public_ip', 'private_ip', 'name']
type: list
auth_kind:
description:
- The type of credential used.
required: True
choices: ['application', 'serviceaccount', 'machineaccount']
env:
- name: GCP_AUTH_KIND
version_added: "2.8"
scopes:
description: list of authentication scopes
type: list
default: ['https://www.googleapis.com/auth/compute']
env:
- name: GCP_SCOPES
version_added: "2.8"
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
env:
- name: GCP_SERVICE_ACCOUNT_FILE
version_added: "2.8"
- name: GCE_CREDENTIALS_FILE_PATH
version_added: "2.8"
service_account_email:
description:
- An optional service account email address if machineaccount is selected
and the user does not wish to use the default email.
env:
- name: GCP_SERVICE_ACCOUNT_EMAIL
version_added: "2.8"
vars_prefix:
description: prefix to apply to host variables, does not include facts nor params
default: ''
use_contrib_script_compatible_sanitization:
description:
- By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
This option allows you to override that, in efforts to allow migration from the old inventory script.
- For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
otherwise the core engine will just use the standard sanitization on top.
- This is not the default as such names break certain functionality as not all characters are valid Python identifiers
which group names end up being used as.
type: bool
default: False
version_added: '2.8'
retrieve_image_info:
description:
- Populate the C(image) host fact for the instances returned with the GCP image name
- By default this plugin does not attempt to resolve the boot image of an instance to the image name cataloged in GCP
because of the performance overhead of the task.
- Unless this option is enabled, the C(image) host variable will be C(null)
type: bool
default: False
version_added: '2.8'
'''
EXAMPLES = '''
plugin: gcp_compute
zones: # populate inventory with instances in these regions
- us-east1-a
projects:
- gcp-prod-gke-100
- gcp-cicd-101
filters:
- machineType = n1-standard-1
- scheduling.automaticRestart = true AND machineType = n1-standard-1
service_account_file: /tmp/service_account.json
auth_kind: serviceaccount
scopes:
- 'https://www.googleapis.com/auth/cloud-platform'
- 'https://www.googleapis.com/auth/compute.readonly'
keyed_groups:
# Create groups from GCE labels
- prefix: gcp
key: labels
hostnames:
# List host by name instead of the default public ip
- name
compose:
# Set an inventory parameter to use the Public IP address to connect to the host
# For Private ip use "networkInterfaces[0].networkIP"
ansible_host: networkInterfaces[0].accessConfigs[0].natIP
'''
import json
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.gcp_utils import GcpSession, navigate_hash, GcpRequestException, HAS_GOOGLE_LIBRARIES
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
# Mocking a module to reuse module_utils
class GcpMockModule(object):
def __init__(self, params):
self.params = params
def fail_json(self, *args, **kwargs):
raise AnsibleError(kwargs['msg'])
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'gcp_compute'
_instances = r"https://www.googleapis.com/compute/v1/projects/%s/aggregated/instances"
def __init__(self):
super(InventoryModule, self).__init__()
self.group_prefix = 'gcp_'
def _populate_host(self, item):
'''
:param item: A GCP instance
'''
hostname = self._get_hostname(item)
self.inventory.add_host(hostname)
for key in item:
try:
self.inventory.set_variable(hostname, self.get_option('vars_prefix') + key, item[key])
except (ValueError, TypeError) as e:
self.display.warning("Could not set host info hostvar for %s, skipping %s: %s" % (hostname, key, to_text(e)))
self.inventory.add_child('all', hostname)
def verify_file(self, path):
'''
:param path: the path to the inventory config file
:return the contents of the config file
'''
if super(InventoryModule, self).verify_file(path):
if path.endswith(('gcp.yml', 'gcp.yaml')):
return True
elif path.endswith(('gcp_compute.yml', 'gcp_compute.yaml')):
return True
return False
def fetch_list(self, params, link, query):
'''
:param params: a dict containing all of the fields relevant to build URL
:param link: a formatted URL
:param query: a formatted query string
:return the JSON response containing a list of instances.
'''
response = self.auth_session.get(link, params={'filter': query})
return self._return_if_object(self.fake_module, response)
def _get_query_options(self, filters):
'''
:param config_data: contents of the inventory config file
:return A fully built query string
'''
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def _return_if_object(self, module, response):
'''
:param module: A GcpModule
:param response: A Requests response object
:return JSON response
'''
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
response.raise_for_status
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
except GcpRequestException as inst:
module.fail_json(msg="Network error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
if result['kind'] != 'compute#instanceAggregatedList' and result['kind'] != 'compute#zoneList':
module.fail_json(msg="Incorrect result: {kind}".format(**result))
return result
def _format_items(self, items, project_disks):
'''
:param items: A list of hosts
'''
for host in items:
if 'zone' in host:
host['zone_selflink'] = host['zone']
host['zone'] = host['zone'].split('/')[-1]
if 'machineType' in host:
host['machineType_selflink'] = host['machineType']
host['machineType'] = host['machineType'].split('/')[-1]
if 'networkInterfaces' in host:
for network in host['networkInterfaces']:
if 'network' in network:
network['network'] = self._format_network_info(network['network'])
if 'subnetwork' in network:
network['subnetwork'] = self._format_network_info(network['subnetwork'])
host['project'] = host['selfLink'].split('/')[6]
host['image'] = self._get_image(host, project_disks)
return items
def _add_hosts(self, items, config_data, format_items=True, project_disks=None):
'''
:param items: A list of hosts
:param config_data: configuration data
:param format_items: format items or not
'''
if not items:
return
if format_items:
items = self._format_items(items, project_disks)
for host in items:
self._populate_host(host)
hostname = self._get_hostname(host)
self._set_composite_vars(self.get_option('compose'), host, hostname)
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname)
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname)
def _format_network_info(self, address):
'''
:param address: A GCP network address
:return a dict with network shortname and region
'''
split = address.split('/')
region = ''
if 'global' in split:
region = 'global'
else:
region = split[8]
return {
'region': region,
'name': split[-1],
'selfLink': address
}
def _get_hostname(self, item):
'''
:param item: A host response from GCP
:return the hostname of this instance
'''
hostname_ordering = ['public_ip', 'private_ip', 'name']
if self.get_option('hostnames'):
hostname_ordering = self.get_option('hostnames')
for order in hostname_ordering:
name = None
if order == 'public_ip':
name = self._get_publicip(item)
elif order == 'private_ip':
name = self._get_privateip(item)
elif order == 'name':
name = item[u'name']
else:
raise AnsibleParserError("%s is not a valid hostname precedent" % order)
if name:
return name
raise AnsibleParserError("No valid name found for host")
def _get_publicip(self, item):
'''
:param item: A host response from GCP
:return the publicIP of this instance or None
'''
# Get public IP if exists
for interface in item['networkInterfaces']:
if 'accessConfigs' in interface:
for accessConfig in interface['accessConfigs']:
if 'natIP' in accessConfig:
return accessConfig[u'natIP']
return None
def _get_image(self, instance, project_disks):
'''
:param instance: A instance response from GCP
:return the image of this instance or None
'''
image = None
if project_disks and 'disks' in instance:
for disk in instance['disks']:
if disk.get('boot'):
image = project_disks[disk["source"]]
return image
def _get_project_disks(self, config_data, query):
'''
project space disk images
'''
try:
self._project_disks
except AttributeError:
self._project_disks = {}
request_params = {'maxResults': 500, 'filter': query}
for project in config_data['projects']:
session_responses = []
page_token = True
while page_token:
response = self.auth_session.get(
'https://www.googleapis.com/compute/v1/projects/{0}/aggregated/disks'.format(project),
params=request_params
)
response_json = response.json()
if 'nextPageToken' in response_json:
request_params['pageToken'] = response_json['nextPageToken']
elif 'pageToken' in request_params:
del request_params['pageToken']
if 'items' in response_json:
session_responses.append(response_json)
page_token = 'pageToken' in request_params
for response in session_responses:
if 'items' in response:
# example k would be a zone or region name
# example v would be { "disks" : [], "otherkey" : "..." }
for zone_or_region, aggregate in response['items'].items():
if 'zones' in zone_or_region:
if 'disks' in aggregate:
zone = zone_or_region.replace('zones/', '')
for disk in aggregate['disks']:
if 'zones' in config_data and zone in config_data['zones']:
# If zones specified, only store those zones' data
if 'sourceImage' in disk:
self._project_disks[disk['selfLink']] = disk['sourceImage'].split('/')[-1]
else:
self._project_disks[disk['selfLink']] = disk['selfLink'].split('/')[-1]
else:
if 'sourceImage' in disk:
self._project_disks[disk['selfLink']] = disk['sourceImage'].split('/')[-1]
else:
self._project_disks[disk['selfLink']] = disk['selfLink'].split('/')[-1]
return self._project_disks
def _get_privateip(self, item):
'''
:param item: A host response from GCP
:return the privateIP of this instance or None
'''
# Fallback: Get private IP
for interface in item[u'networkInterfaces']:
if 'networkIP' in interface:
return interface[u'networkIP']
def parse(self, inventory, loader, path, cache=True):
if not HAS_GOOGLE_LIBRARIES:
raise AnsibleParserError('gce inventory plugin cannot start: %s' % missing_required_lib('google-auth'))
super(InventoryModule, self).parse(inventory, loader, path)
config_data = {}
config_data = self._read_config_data(path)
if self.get_option('use_contrib_script_compatible_sanitization'):
self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
# setup parameters as expected by 'fake module class' to reuse module_utils w/o changing the API
params = {
'filters': self.get_option('filters'),
'projects': self.get_option('projects'),
'scopes': self.get_option('scopes'),
'zones': self.get_option('zones'),
'auth_kind': self.get_option('auth_kind'),
'service_account_file': self.get_option('service_account_file'),
'service_account_email': self.get_option('service_account_email'),
}
self.fake_module = GcpMockModule(params)
self.auth_session = GcpSession(self.fake_module, 'compute')
query = self._get_query_options(params['filters'])
if self.get_option('retrieve_image_info'):
project_disks = self._get_project_disks(config_data, query)
else:
project_disks = None
# Cache logic
if cache:
cache = self.get_option('cache')
cache_key = self.get_cache_key(path)
else:
cache_key = None
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
for project in results:
for zone in results[project]:
self._add_hosts(results[project][zone], config_data, False, project_disks=project_disks)
except KeyError:
cache_needs_update = True
if not cache or cache_needs_update:
cached_data = {}
for project in params['projects']:
cached_data[project] = {}
params['project'] = project
zones = params['zones']
# Fetch all instances
link = self._instances % project
resp = self.fetch_list(params, link, query)
for key, value in resp.get('items').items():
if 'instances' in value:
# Key is in format: "zones/europe-west1-b"
zone = key[6:]
if not zones or zone in zones:
self._add_hosts(value['instances'], config_data, project_disks=project_disks)
cached_data[project][zone] = value['instances']
if cache_needs_update:
self._cache[cache_key] = cached_data
@staticmethod
def _legacy_script_compatible_group_sanitization(name):
return name
| gpl-3.0 | 8,487,148,680,798,984,000 | 38.769388 | 137 | 0.556935 | false |
acg/lwpb | python/pbsplit.py | 1 | 1605 | #!/usr/bin/env python
'''
pbsplit - split a protobuf stream into multiple files
'''
import sys
import getopt
import lwpb
import lwpb.stream
import lwpb.codec
def shift(L): e = L[0] ; del L[0:1] ; return e
def main():
typename = ""
skip = 0
count = -1
splitsize = 1000 # in number of records
pb2file = None
infile = "-"
fin = sys.stdin
template = None
opts, args = getopt.getopt(sys.argv[1:], 'p:m:s:c:t:z:')
for o, a in opts:
if o == '-p':
pb2file = a
elif o == '-m':
typename = a
elif o == '-s':
skip = int(a)
elif o == '-c':
count = int(a)
elif o == '-t':
template = a
elif o == '-z':
splitsize = int(a)
if len(args):
infile = shift(args)
fin = file(infile)
if template == None:
template = infile+".%05u"
codec = lwpb.codec.MessageCodec(pb2file=pb2file, typename=typename)
reader = lwpb.stream.StreamReader(fin, codec=codec)
writer = None
fout = None
outfile = None
splitnum = 0
splitwritten = 0
written = 0
for record in reader:
if reader.current_number < skip:
continue
if count >= 0 and written >= count:
break
if fout == None:
outfile = template % splitnum
fout = file(outfile, 'w')
writer = lwpb.stream.StreamWriter(fout, codec=codec)
splitwritten = 0
writer.write_raw( reader.current_raw )
written += 1
splitwritten += 1
if splitwritten >= splitsize:
fout.close()
fout = None
splitnum += 1
if fout:
fout.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 1,052,393,894,243,850,500 | 16.637363 | 69 | 0.576947 | false |
bitmovin/bitmovin-python | examples/encoding/create_progressive_webm_encoding_with_vp9_and_opus_codecs.py | 1 | 4978 | import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, \
StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
MuxingStream, CloudRegion, ProgressiveWebMMuxing, VP9CodecConfiguration, OpusCodecConfiguration, VP9Quality
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT_YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = '/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='example webm encoding',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1,
encoder_version='BETA')
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_1080p = VP9CodecConfiguration(name='example_video_codec_configuration_1080p',
bitrate=4800000,
rate=25.0,
width=1920,
height=1080,
tile_columns=2,
quality=VP9Quality.GOOD)
video_codec_configuration_1080p = bitmovin.codecConfigurations.VP9.create(video_codec_configuration_1080p).resource
audio_codec_configuration = OpusCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.Opus.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_1080p = Stream(codec_configuration_id=video_codec_configuration_1080p.id,
input_streams=[video_input_stream], name='Sample Stream 1080p')
video_stream_1080p = bitmovin.encodings.Stream.create(object_=video_stream_1080p,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream], name='Sample Stream AUDIO')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
audio_muxing_stream = MuxingStream(audio_stream.id)
video_muxing_stream_1080p = MuxingStream(video_stream_1080p.id)
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
webm_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
webm_muxing = ProgressiveWebMMuxing(streams=[video_muxing_stream_1080p, audio_muxing_stream],
filename='myfile.webm',
outputs=[webm_muxing_output],
name='Sample WebM Muxing 1080p')
webm_muxing = bitmovin.encodings.Muxing.ProgressiveWebM.create(object_=webm_muxing,
encoding_id=encoding.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
print("File successfully encoded")
if __name__ == '__main__':
main()
| unlicense | -3,270,803,099,738,486,000 | 48.78 | 119 | 0.57955 | false |
f3at/feat | src/feat/models/value.py | 1 | 25453 | # F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
from zope.interface import implements, classImplements
from feat.common import annotate, container
from feat.models import meta as models_meta, action
from feat.models.interface import IValueInfo, NotSupported, IValueOptions
from feat.models.interface import IValidator, IValueRange, ValueTypes
from feat.models.interface import IEncodingInfo, IModel, IReference
from feat.models.interface import IValueOption, IResponse, MissingParameters
from feat.models.interface import UnknownParameters, InvalidParameters
from feat.models.interface import IValueCollection, IValueList
from feat.interface.serialization import ISnapshotable
meta = models_meta.meta
def label(lable):
"""
Annotates the IValueInfo label.
@param lable: label of the IValueInfo being defined.
@type lable: str or unicode
"""
_annotate("label", lable)
def desc(desc):
"""
Annotates the IValueInfo description.
@param desc: description of the IValueInfo being defined.
@type desc: str or unicode
"""
_annotate("desc", desc)
def value_type(vtype):
"""
Annotates the IValueInfo value type.
@param vtype: type of the IValueInfo being defined.
@type vtype: ValueTypes
"""
_annotate("value_type", vtype)
def default(default):
"""
Annotates the IValueInfo default value,
will be validated at instance creation time.
@param default: default value of the IValueInfo being defined.
@type default: Any
"""
_annotate("default", default)
def option(value, is_default=False, label=None):
"""
Annotates a possible value for IValueOptions,
will be validated at instance creation time.
@param value: a possible value for the IValueOptions being defined.
@type value: Any
@param is_default: if the option should be the default value.
@type is_default: bool
@param label: option label or None; if none the string representation
of the value will be used as label.
@type label: str or unicode or None
"""
_annotate("option", value, is_default=is_default, label=label)
def options_only():
"""
Annotates to enforce the value to be one of the specified options.
"""
_annotate("options_only")
def allows(value_info):
"""
Annotate an allowed value info for a collection.
@param value_info: an allowed value for the collection.
@type value_info: IValueInfo
"""
_annotate("allows", value_info)
def is_ordered(flag):
"""Annotate a collection to be ordered.
@param flag: if the collection order is important.
@type flag: bool
"""
_annotate("is_ordered", flag)
def min_size(size):
"""
Annotate a collection minimum size.
@param size: the collection minimum size.
@type size: int
"""
_annotate("min_size", size)
def max_size(size):
"""
Annotate a collection maximum size.
@param size: the collection maximum size.
@type size: int
"""
_annotate("max_size", size)
def _annotate(name, *args, **kwargs):
method_name = "annotate_" + name
annotate.injectClassCallback(name, 4, method_name, *args, **kwargs)
class BaseValue(models_meta.Metadata):
implements(IValueInfo, IValidator)
_class_label = None
_class_desc = None
_class_value_type = None
_class_use_default = False
_class_default = None
### IValueInfo ###
@property
def label(self):
return self._class_label
@property
def desc(self):
return self._class_desc
@property
def value_type(self):
return self._class_value_type
@property
def use_default(self):
return self._class_use_default
@property
def default(self):
return self._class_default
def __eq__(self, other):
if not IValueInfo.providedBy(other):
return NotSupported
other = IValueInfo(other)
if self.value_type != other.value_type:
return False
if self.use_default != other.use_default:
return False
if self.use_default and (self._default != other.default):
return False
if IValueOptions.providedBy(self) != IValueOptions.providedBy(other):
return False
if IValueOptions.providedBy(self):
other = IValueOptions(other)
other_options = set(other.iter_options())
self_options = set(self.iter_options())
if other_options != self_options:
return False
if self.is_restricted != other.is_restricted:
return False
if IValueRange.providedBy(self) != IValueRange.providedBy(other):
return False
if IValueRange.providedBy(self):
other = IValueRange(other)
if (self.minimum != other.minimum
or self.maximum != other.maximum
or self.increment != other.increment):
return False
return True
def __ne__(self, other):
eq = self.__eq__(other)
return eq if eq is NotSupported else not eq
### IValidator ###
def validate(self, value):
if value is None and self.use_default:
value = self.default
return value
def publish(self, value):
if value is None and self.use_default:
value = self.default
return value
def as_string(self, value):
return unicode(self.publish(value))
### annotations ###
@classmethod
def annotate_label(cls, label):
"""@see: feat.models.value.label"""
cls._class_label = label
@classmethod
def annotate_desc(cls, desc):
"""@see: feat.models.value.desc"""
cls._class_desc = desc
@classmethod
def annotate_value_type(cls, value_type):
"""@see: feat.models.value.value_type"""
if value_type not in ValueTypes:
raise ValueError(value_type)
cls._class_value_type = value_type
@classmethod
def annotate_default(cls, default):
"""@see: feat.models.value.default"""
cls._class_use_default = True
cls._class_default = default
class Binary(BaseValue):
implements(IEncodingInfo)
value_type(ValueTypes.binary)
def __init__(self, mime_type=None, encoding=None):
self._mime_type = mime_type
self._encoding = encoding
### IEncodingInfo ###
@property
def mime_type(self):
return self._mime_type
@property
def encoding(self):
return self._encoding
class InterfaceValue(BaseValue):
_value_interface = None
def __init__(self, value_interface=None):
if type(self)._value_interface is None:
self._value_interface = value_interface
def validate(self, value):
new_value = BaseValue.validate(self, value)
if not self._value_interface.providedBy(value):
raise ValueError(value)
return new_value
def publish(self, value):
new_value = BaseValue.publish(self, value)
if not self._value_interface.providedBy(value):
raise ValueError("%r does not provide %r interface" %
(value, self._value_interface))
return new_value
class Response(InterfaceValue):
"""Definition of a model value."""
_value_interface = IResponse
value_type(ValueTypes.model)
class Model(InterfaceValue):
"""Definition of a model value."""
_value_interface = IModel
value_type(ValueTypes.model)
class Reference(InterfaceValue):
"""Definition of a model value."""
_value_interface = IReference
value_type(ValueTypes.reference)
class Struct(BaseValue):
"""Definition of a model value."""
_value_interface = ISnapshotable
value_type(ValueTypes.struct)
class Value(BaseValue):
_class_options = None
_class_options_only = False
def __init__(self, *args, **kwargs):
label = self._class_label
desc = self._class_desc
self._label = unicode(label) if label is not None else None
self._desc = unicode(desc) if desc is not None else None
self._value_type = self._class_value_type
self._options_only = False
self._options = []
if self._class_options is not None:
for v, l in self._class_options:
self._add_option(v, l)
self._options_only = self._class_options_only
self._use_default = self._class_use_default
self._default = None
if self._use_default:
self._default = self._validate_default(self._class_default)
if "default" in kwargs:
if len(args) > 0:
raise ValueError("If the default value is specified "
"as a keyword, no argument are allowed")
self._set_default(kwargs.pop("default"))
else:
if len(args) > 1:
raise ValueError("Only default value is "
"supported as argument")
if len(args) > 0:
self._set_default(args[0])
if kwargs:
raise ValueError("Unsupported keyword arguments")
### IValueInfo ###
@property
def label(self):
return self._label
@property
def desc(self):
return self._desc
@property
def value_type(self):
return self._value_type
@property
def use_default(self):
return self._use_default
@property
def default(self):
return self._default
### IValidator ###
def validate(self, value):
value = BaseValue.validate(self, value)
if self._options_only and not self._has_option(value):
raise ValueError("Value not allowed: %r" % (value, ))
return value
def publish(self, value):
value = BaseValue.validate(self, value)
if self._options_only and not self._has_option(value):
raise ValueError("Value not allowed: %r" % (value, ))
return value
### IValueOptions ###
@property
def is_restricted(self):
return self._options_only
def count_options(self):
return len(self._options)
def iter_options(self):
return iter(self._options)
def has_option(self, value):
try:
return self._has_option(self._validate_option(value))
except ValueError:
return False
def get_option(self, value):
value = unicode(value)
try:
return next((o for o in self._options if o.value == value))
except StopIteration:
return None
### protected ###
def _validate_default(self, value):
return self.validate(value)
def _validate_option(self, value):
return self.validate(value)
def _has_option(self, value):
try:
next((o for o in self._options if o.value == value))
return True
except StopIteration:
return False
def _set_default(self, default):
self._default = self._validate_default(default)
self._use_default = True
def _add_option(self, value, label=None):
# Disable options_only to be able to validate the value
options_only = self._options_only
self._options_only = False
try:
self._validate_option(value)
option = ValueOption(value, label)
self._options.append(option)
finally:
self._options_only = options_only
### annotations ###
@classmethod
def annotate_option(cls, value, is_default=False, label=None):
"""@see: feat.models.value.option"""
if cls._class_options is None:
cls._class_options = container.MroList("_mro_options")
classImplements(cls, IValueOptions)
if is_default:
cls._class_default = value
cls._class_use_default = True
cls._class_options.append((value, label))
@classmethod
def annotate_options_only(cls):
"""@see: feat.models.value.options_only"""
cls._class_options_only = True
class ValueOption(object):
"""Pair of value/label defining a possible option.
@see: feat.models.interface.IValueOption"""
implements(IValueOption)
def __init__(self, value, label=None):
self._value = value
self._label = unicode(label) if label is not None else unicode(value)
### IValueOption ###
@property
def value(self):
return self._value
@property
def label(self):
return self._label
def __eq__(self, other):
if not IValueOption.providedBy(other):
return False
return (self._value == other.value
and self._label == other.label)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._value) ^ hash(self._label)
class String(Value):
"""String value definition."""
value_type(ValueTypes.string)
### overridden ###
def validate(self, value):
"""
Accepts: str, unicode
Returns: unicode
"""
val = value
if isinstance(val, str):
#FIXME: unsafe decoding
val = unicode(value)
val = super(String, self).validate(val)
if not isinstance(val, unicode):
raise ValueError("Not a string: %r" % (value, ))
return val
def publish(self, value):
"""
Accepts: unicode, str
Returns: unicode
"""
val = value
if isinstance(val, str):
#FIXME: unsafe decoding
val = unicode(value)
val = super(String, self).publish(val)
if not isinstance(val, unicode):
raise ValueError("Not a string: %r" % (value, ))
return val
class Float(Value):
value_type(ValueTypes.number)
def validate(self, value):
"""
Accepts: float, int, long, str, unicode
Returns: float
"""
if isinstance(value, (str, unicode, int, long)):
value = float(value)
value = super(Float, self).validate(value)
if not isinstance(value, (float)):
raise ValueError("Not an float: %r" % (value, ))
return value
def publish(self, value):
"""
Accepts: float
Returns: float
"""
value = super(Float, self).publish(value)
if isinstance(value, int):
value = float(value)
return value
class Integer(Value):
"""Definition of an basic integer value."""
value_type(ValueTypes.integer)
### overridden ###
def validate(self, value):
"""
Accepts: int, long, str, unicode
Returns: int, long
"""
if isinstance(value, (str, unicode, float)):
value = int(value)
value = super(Integer, self).validate(value)
if not isinstance(value, (int, long)):
raise ValueError("Not an integer: %r" % (value, ))
return value
def publish(self, value):
"""
Accepts: int, long
Returns: int, long
"""
value = super(Integer, self).publish(value)
if isinstance(value, float):
value = int(value)
if not isinstance(value, (int, long)):
raise ValueError("Not an integer: %r" % (value, ))
return value
class Boolean(Value):
"""Definition of an basic integer value."""
value_type(ValueTypes.boolean)
option(True, label="True")
option(False, label="False")
options_only()
### overridden ###
def validate(self, value):
"""
Accepts: str, unicode, bool
Returns: bool
"""
if isinstance(value, bool):
return value
if isinstance(value, (str, unicode)):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Not a boolean: %r" % (value, ))
value = super(Boolean, self).validate(value)
if not isinstance(value, bool):
raise ValueError("Not a boolean: %r" % (value, ))
return value
def publish(self, value):
value = super(Boolean, self).publish(value)
if not isinstance(value, bool):
raise ValueError("Not a boolean: %r" % (value, ))
return value
class Enum(Value):
"""Definition of integer value with a fixed
set of possible values taken from an enumeration."""
value_type(ValueTypes.string)
options_only()
implements(IValueOptions)
def __init__(self, enum, *args, **kwargs):
self._enum = enum
Value.__init__(self, *args, **kwargs)
for i in enum:
self._add_option(i)
### IValidator ###
def validate(self, value):
if value is None and self._use_default:
value = self._default
if isinstance(value, (str, unicode, int)):
if value in self._enum:
return self._enum[value]
if isinstance(value, int):
if value in self._enum:
return unicode(self._enum[value].name)
raise ValueError(value)
def publish(self, value):
if value is None and self._use_default:
value = self._default
if isinstance(value, (str, unicode)):
if value in self._enum:
return unicode(value)
if isinstance(value, int):
if value in self._enum:
return unicode(self._enum[value].name)
raise ValueError(value)
### overridden ###
def _validate_option(self, value):
return unicode(self.validate(value).name)
def _add_option(self, value, label=None):
if isinstance(value, self._enum):
value = unicode(value.name)
return Value._add_option(self, value, label)
class FixedValues(Value):
'''
String value of one of defined options.
Use: FixedValue(["option1", "option2", ...])
'''
value_type(ValueTypes.string)
options_only()
implements(IValueOptions)
def __init__(self, values, *args, **kwargs):
Value.__init__(self, *args, **kwargs)
for v in values:
self._add_option(v)
class Structure(Value):
implements(IValueList)
value_type(ValueTypes.struct)
_fields = container.MroList("_mro_fields")
def validate(self, value):
if not isinstance(value, dict):
raise ValueError("Expected dictionary, got %r" % (value, ))
fields = self.fields
params = set(value.keys())
expected = set([p.name for p in fields])
required = set([p.name for p in fields if p.is_required])
missing = required - params
if missing:
raise MissingParameters("", params=missing)
unknown = params - expected
if unknown:
raise UnknownParameters("", params=unknown)
param_index = dict([(p.name, p) for p in fields])
validated = {}
errors = {}
for param_name, param_value in value.iteritems():
param_name = str(param_name)
info = param_index[param_name].value_info
try:
valval = IValidator(info).validate(param_value)
validated[param_name] = valval
except ValueError, e:
errors[param_name] = e
if errors:
raise InvalidParameters("", params=errors)
for param in fields:
if not param.is_required:
info = param.value_info
if param.name not in validated and info.use_default:
validated[str(param.name)] = info.default
return validated
def publish(self, value):
def getter(value, name):
try:
if isinstance(value, dict):
return value[name]
else:
return getattr(value, name)
except (KeyError, AttributeError) as e:
raise ValueError(str(e))
result = dict()
for field in self.fields:
try:
v = getter(value, field.name)
result[field.name] = field.value_info.publish(v)
except ValueError:
if field.is_required:
raise
if field.value_info.use_default:
result[field.name] = field.value_info.publish(
field.value_info.default)
return result
### IValueList ###
@property
def fields(self):
inverted_result = []
already_added = set()
for p in reversed(self._fields):
if p.name not in already_added:
inverted_result.append(p)
already_added.add(p.name)
return list(reversed(inverted_result))
### annotations ###
@classmethod
def annotate_param(cls, name, value_info, is_required=True,
label=None, desc=None):
name = unicode(name)
param = action.Param(name, value_info, is_required=is_required,
label=label, desc=desc)
cls._fields.append(param)
field = action.param
class MetaCollection(type(Value)):
@staticmethod
def new(name, allowed_types=[], min_size=None, max_size=None,
is_ordered=True):
cls = MetaCollection(name, (Collection, ), {})
for value_info in allowed_types:
cls.annotate_allows(value_info)
cls.annotate_is_ordered(is_ordered)
if min_size is not None:
cls.annotate_min_size(min_size)
if max_size is not None:
cls.annotate_max_size(max_size)
return cls
class Collection(Value):
implements(IValueCollection)
_class_allowed_types = container.MroList("_mro_allowed_types")
_class_is_ordered = True
_class_min_size = None
_class_max_size = None
value_type(ValueTypes.collection)
### IValueCollection ###
@property
def allowed_types(self):
return list(self._class_allowed_types)
@property
def is_ordered(self):
return self._class_is_ordered
@property
def min_size(self):
return self._class_min_size
@property
def max_size(self):
return self._class_max_size
### overridden ###
def validate(self, value):
return self._convert(value, "validate")
def publish(self, value):
return self._convert(value, "publish")
### annotations ###
@classmethod
def annotate_allows(cls, value_info):
"""@see: feat.models.value.allows"""
value_info = _validate_value_info(value_info)
cls._class_allowed_types.append(value_info)
@classmethod
def annotate_is_ordered(cls, flag):
"""@see: feat.models.value.is_ordered"""
cls._class_is_ordered = _validate_flag(flag)
@classmethod
def annotate_min_size(cls, size):
"""@see: feat.models.value.min_size"""
cls._class_min_size = _validate_size(size)
@classmethod
def annotate_max_size(cls, size):
"""@see: feat.models.value.max_size"""
cls._class_max_size = _validate_size(size)
### private ###
def _convert(self, value, method_name):
if isinstance(value, (str, unicode)):
raise ValueError(value)
try:
all_values = list(value)
except TypeError:
raise ValueError(value)
result = []
if self._class_min_size is not None:
if len(all_values) < self._class_min_size:
raise ValueError(value)
if self._class_max_size is not None:
if len(all_values) > self._class_max_size:
raise ValueError(value)
allowed_types = list(self._class_allowed_types)
for v in all_values:
for allowed in allowed_types:
try:
result.append(getattr(allowed, method_name)(v))
break
except (ValueError, InvalidParameters), e:
continue
else:
raise ValueError(value)
return result
### private ###
def _validate_value_info(value_info):
return IValueInfo(value_info)
def _validate_size(size):
return int(size)
def _validate_flag(flag):
return bool(flag)
| gpl-2.0 | 6,844,360,681,269,452,000 | 26.606291 | 78 | 0.591011 | false |
beiko-lab/gengis | bin/Lib/site-packages/numpy/ma/mrecords.py | 1 | 28557 | """:mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
:author: Pierre Gerard-Marchant
"""
#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask',
#!!!: or whatever restricted keywords.
#!!!: An idea would be to no bother in the first place, and then rename the
#!!!: invalid fields with a trailing underscore...
#!!!: Maybe we could just overload the parser function ?
__author__ = "Pierre GF Gerard-Marchant"
import sys
import numpy as np
from numpy import bool_, dtype, \
ndarray, recarray, array as narray
import numpy.core.numerictypes as ntypes
from numpy.core.records import fromarrays as recfromarrays, \
fromrecords as recfromrecords
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \
getdata, getmaskarray, filled
_check_fill_value = ma.core._check_fill_value
import warnings
__all__ = ['MaskedRecords', 'mrecarray',
'fromarrays', 'fromrecords', 'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _getformats(data):
"Returns the formats of each array of arraylist as a comma-separated string."
if hasattr(data, 'dtype'):
return ",".join([desc[1] for desc in data.dtype.descr])
formats = ''
for obj in data:
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
formats += `obj.itemsize`
formats += ','
return formats[:-1]
def _checknames(descr, names=None):
"""Checks that the field names of the descriptor ``descr`` are not some
reserved keywords. If this is the case, a default 'f%i' is substituted.
If the argument `names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError("illegal input names %s" % `names`)
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray, object):
"""
*IVariables*:
_data : {recarray}
Underlying data, as a record array.
_mask : {boolean array}
Mask of the records. A record is masked when all its fields are masked.
_fieldmask : {boolean recarray}
Record array of booleans, setting the mask of each individual field of each record.
_fill_value : {record}
Filling values for each field.
"""
#............................................
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
#
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
#
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
#......................................................
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default ..
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
def _getdata(self):
"Returns the data as a recarray."
return ndarray.view(self, recarray)
_data = property(fget=_getdata)
def _getfieldmask(self):
"Alias to mask"
return self._mask
_fieldmask = property(fget=_getfieldmask)
def __len__(self):
"Returns the length"
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
# So far, so good...
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.fields:
raise NotImplementedError("MaskedRecords is currently limited to"\
"simple records...")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"Sets the attribute attr to the value val."
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
raise exctype, value
else:
# Get the list of names ......
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
#
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`."""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field ........
if isinstance(indx, basestring):
#!!!: Make sure _sharedmask is True to propagate back to _fieldmask
#!!!: Don't use _set_mask, there are some copies being made...
#!!!: ...that break propagation
#!!!: Don't force the mask to nomask, that wrecks easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements ..
# First, the data ........
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
#....
def __setitem__(self, indx, value):
"Sets the given record to value."
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, basestring):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"Calculates the string representation."
if self.size > 1:
mstr = ["(%s)" % ",".join([str(i) for i in s])
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return "[%s]" % ", ".join(mstr)
else:
mstr = ["%s" % ",".join([str(i) for i in s])
for s in zip([getattr(self, f) for f in self.dtype.names])]
return "(%s)" % ", ".join(mstr)
#
def __repr__(self):
"Calculates the repr representation."
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
# #......................................................
def view(self, dtype=None, type=None):
"""Returns a view of the mrecarray."""
# OK, basic copy-paste from MaskedArray.view...
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again...
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# ...of subclasses (eg, TimeSeriesRecords), so we'll force a type
# ...set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"Forces the mask to hard"
self._hardmask = True
def soften_mask(self):
"Forces the mask to soft"
self._hardmask = False
def copy(self):
"""Returns a copy of the masked record."""
_localdict = self.__dict__
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""Copy the data portion of the array to a hierarchical python
list and returns that list.
Data items are converted to the nearest compatible Python
type. Masked values are converted to fill_value. If
fill_value is None, the corresponding entries in the output
list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling purposes.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tostring(),
self._mask.tostring(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for pickling purposes.
``state`` is typically the output of the ``__getstate__`` output, and is a
5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1')
# return _data
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
#####---------------------------------------------------------------------------
#---- --- Constructors ---
#####---------------------------------------------------------------------------
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = zip(*masklist)
if fill_value is not None:
_array.fill_value = fill_value
return _array
#..............................................................................
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.....
try:
nfields = len(reclist[0])
except TypeError:
nfields = len(reclist[0].dtype)
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif len(mask.shape) == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise
conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test is performed
on the first line. An exception is raised if the file is 3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if len(arr.shape) == 2 :
arr = arr[0]
elif len(arr.shape) > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop .......
for f in arr:
try:
int(f)
except ValueError:
try:
float(f)
except ValueError:
try:
val = complex(f)
except ValueError:
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"Opens the file handle of file `fname`"
# A file handle ...................
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError("No such file: '%s'" % fname)
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
filename : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file ......................
f = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = f.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data ..............................
_variables = masked_array([line.strip().split(delimitor) for line in f
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
f.close()
# Try to guess the dtype ....................
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor ..................
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask .................
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
#....................................................................
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data ............
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask .............
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
| gpl-3.0 | -4,077,217,941,820,372,500 | 37.226648 | 91 | 0.536366 | false |
yaybu/touchdown | touchdown/tests/test_aws_s3_file.py | 1 | 3062 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.fixtures.aws import BucketFixture
from touchdown.tests.stubs.aws import S3FileStubber
class TestBucketCreation(StubberTestCase):
def test_create_bucket(self):
goal = self.create_goal("apply")
bucket = self.fixtures.enter_context(BucketFixture(goal, self.aws))
s3_file = self.fixtures.enter_context(
S3FileStubber(
goal.get_service(
bucket.bucket.add_file(name="my-file", contents="my-test-content"),
"apply",
)
)
)
s3_file.add_list_objects_empty_response()
s3_file.add_put_object()
s3_file.add_list_objects_one_response()
s3_file.add_list_objects_one_response()
s3_file.add_list_objects_one_response()
goal.execute()
def test_create_bucket_idempotent(self):
goal = self.create_goal("apply")
bucket = self.fixtures.enter_context(BucketFixture(goal, self.aws))
s3_file = self.fixtures.enter_context(
S3FileStubber(
goal.get_service(
bucket.bucket.add_file(name="my-file", contents="my-test-content"),
"apply",
)
)
)
s3_file.add_list_objects_one_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(s3_file.resource)), 0)
class TestBucketDeletion(StubberTestCase):
def test_delete_bucket(self):
goal = self.create_goal("destroy")
bucket = self.fixtures.enter_context(BucketFixture(goal, self.aws))
s3_file = self.fixtures.enter_context(
S3FileStubber(
goal.get_service(bucket.bucket.add_file(name="my-file"), "destroy")
)
)
s3_file.add_list_objects_one_response()
s3_file.add_delete_object()
goal.execute()
def test_delete_bucket_idempotent(self):
goal = self.create_goal("destroy")
bucket = self.fixtures.enter_context(BucketFixture(goal, self.aws))
s3_file = self.fixtures.enter_context(
S3FileStubber(
goal.get_service(bucket.bucket.add_file(name="my-file"), "destroy")
)
)
s3_file.add_list_objects_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(s3_file.resource)), 0)
| apache-2.0 | -4,602,095,424,609,843,000 | 33.022222 | 87 | 0.629001 | false |
schleichdi2/OPENNFR-6.1-CORE | opennfr-openembedded-core/meta/lib/oeqa/utils/qemurunner.py | 1 | 24225 | # Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module provides a class for starting qemu images using runqemu.
# It's used by testimage.bbclass.
import subprocess
import os
import sys
import time
import signal
import re
import socket
import select
import errno
import string
import threading
import codecs
from oeqa.utils.dump import HostDumper
import logging
logger = logging.getLogger("BitBake.QemuRunner")
logger.addHandler(logging.StreamHandler())
# Get Unicode non printable control chars
control_range = list(range(0,32))+list(range(127,160))
control_chars = [chr(x) for x in control_range
if chr(x) not in string.printable]
re_control_char = re.compile('[%s]' % re.escape("".join(control_chars)))
class QemuRunner:
def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds, use_kvm):
# Popen object for runqemu
self.runqemu = None
# pid of the qemu process that runqemu will start
self.qemupid = None
# target ip - from the command line or runqemu output
self.ip = None
# host ip - where qemu is running
self.server_ip = None
# target ip netmask
self.netmask = None
self.machine = machine
self.rootfs = rootfs
self.display = display
self.tmpdir = tmpdir
self.deploy_dir_image = deploy_dir_image
self.logfile = logfile
self.boottime = boottime
self.logged = False
self.thread = None
self.use_kvm = use_kvm
self.runqemutime = 60
self.host_dumper = HostDumper(dump_host_cmds, dump_dir)
def create_socket(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
sock.bind(("127.0.0.1",0))
sock.listen(2)
port = sock.getsockname()[1]
logger.info("Created listening socket for qemu serial console on: 127.0.0.1:%s" % port)
return (sock, port)
except socket.error:
sock.close()
raise
def log(self, msg):
if self.logfile:
# It is needed to sanitize the data received from qemu
# because is possible to have control characters
msg = msg.decode("utf-8", errors='ignore')
msg = re_control_char.sub('', msg)
with codecs.open(self.logfile, "a", encoding="utf-8") as f:
f.write("%s" % msg)
def getOutput(self, o):
import fcntl
fl = fcntl.fcntl(o, fcntl.F_GETFL)
fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return os.read(o.fileno(), 1000000).decode("utf-8")
def handleSIGCHLD(self, signum, frame):
if self.runqemu and self.runqemu.poll():
if self.runqemu.returncode:
logger.info('runqemu exited with code %d' % self.runqemu.returncode)
logger.info("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
self.stop()
self._dump_host()
raise SystemExit
def start(self, qemuparams = None, get_ip = True, extra_bootparams = None, runqemuparams='', launch_cmd=None, discard_writes=True):
if self.display:
os.environ["DISPLAY"] = self.display
# Set this flag so that Qemu doesn't do any grabs as SDL grabs
# interact badly with screensavers.
os.environ["QEMU_DONT_GRAB"] = "1"
if not os.path.exists(self.rootfs):
logger.error("Invalid rootfs %s" % self.rootfs)
return False
if not os.path.exists(self.tmpdir):
logger.error("Invalid TMPDIR path %s" % self.tmpdir)
return False
else:
os.environ["OE_TMPDIR"] = self.tmpdir
if not os.path.exists(self.deploy_dir_image):
logger.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
return False
else:
os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
if not launch_cmd:
launch_cmd = 'runqemu %s %s ' % ('snapshot' if discard_writes else '', runqemuparams)
if self.use_kvm:
logger.info('Using kvm for runqemu')
launch_cmd += ' kvm'
else:
logger.info('Not using kvm for runqemu')
if not self.display:
launch_cmd += ' nographic'
launch_cmd += ' %s %s' % (self.machine, self.rootfs)
return self.launch(launch_cmd, qemuparams=qemuparams, get_ip=get_ip, extra_bootparams=extra_bootparams)
def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None):
try:
threadsock, threadport = self.create_socket()
self.server_socket, self.serverport = self.create_socket()
except socket.error as msg:
logger.error("Failed to create listening socket: %s" % msg[1])
return False
bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1'
if extra_bootparams:
bootparams = bootparams + ' ' + extra_bootparams
self.qemuparams = 'bootparams="{0}" qemuparams="-serial tcp:127.0.0.1:{1}"'.format(bootparams, threadport)
if qemuparams:
self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams)
self.origchldhandler = signal.getsignal(signal.SIGCHLD)
signal.signal(signal.SIGCHLD, self.handleSIGCHLD)
logger.info('launchcmd=%s'%(launch_cmd))
# FIXME: We pass in stdin=subprocess.PIPE here to work around stty
# blocking at the end of the runqemu script when using this within
# oe-selftest (this makes stty error out immediately). There ought
# to be a proper fix but this will suffice for now.
self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
output = self.runqemu.stdout
#
# We need the preexec_fn above so that all runqemu processes can easily be killed
# (by killing their process group). This presents a problem if this controlling
# process itself is killed however since those processes don't notice the death
# of the parent and merrily continue on.
#
# Rather than hack runqemu to deal with this, we add something here instead.
# Basically we fork off another process which holds an open pipe to the parent
# and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills
# the process group. This is like pctrl's PDEATHSIG but for a process group
# rather than a single process.
#
r, w = os.pipe()
self.monitorpid = os.fork()
if self.monitorpid:
os.close(r)
self.monitorpipe = os.fdopen(w, "w")
else:
# child process
os.setpgrp()
os.close(w)
r = os.fdopen(r)
x = r.read()
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
sys.exit(0)
logger.info("runqemu started, pid is %s" % self.runqemu.pid)
logger.info("waiting at most %s seconds for qemu pid" % self.runqemutime)
endtime = time.time() + self.runqemutime
while not self.is_alive() and time.time() < endtime:
if self.runqemu.poll():
if self.runqemu.returncode:
# No point waiting any longer
logger.info('runqemu exited with code %d' % self.runqemu.returncode)
self._dump_host()
self.stop()
logger.info("Output from runqemu:\n%s" % self.getOutput(output))
return False
time.sleep(1)
out = self.getOutput(output)
netconf = False # network configuration is not required by default
if self.is_alive():
logger.info("qemu started - qemu procces pid is %s" % self.qemupid)
if get_ip:
cmdline = ''
with open('/proc/%s/cmdline' % self.qemupid) as p:
cmdline = p.read()
# It is needed to sanitize the data received
# because is possible to have control characters
cmdline = re_control_char.sub('', cmdline)
try:
ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
self.ip = ips[0]
self.server_ip = ips[1]
logger.info("qemu cmdline used:\n{}".format(cmdline))
except (IndexError, ValueError):
# Try to get network configuration from runqemu output
match = re.match('.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*',
out, re.MULTILINE|re.DOTALL)
if match:
self.ip, self.server_ip, self.netmask = match.groups()
# network configuration is required as we couldn't get it
# from the runqemu command line, so qemu doesn't run kernel
# and guest networking is not configured
netconf = True
else:
logger.error("Couldn't get ip from qemu command line and runqemu output! "
"Here is the qemu command line used:\n%s\n"
"and output from runqemu:\n%s" % (cmdline, out))
self._dump_host()
self.stop()
return False
logger.info("Target IP: %s" % self.ip)
logger.info("Server IP: %s" % self.server_ip)
self.thread = LoggingThread(self.log, threadsock, logger)
self.thread.start()
if not self.thread.connection_established.wait(self.boottime):
logger.error("Didn't receive a console connection from qemu. "
"Here is the qemu command line used:\n%s\nand "
"output from runqemu:\n%s" % (cmdline, out))
self.stop_thread()
return False
logger.info("Output from runqemu:\n%s", out)
logger.info("Waiting at most %d seconds for login banner" % self.boottime)
endtime = time.time() + self.boottime
socklist = [self.server_socket]
reachedlogin = False
stopread = False
qemusock = None
bootlog = ''
data = b''
while time.time() < endtime and not stopread:
try:
sread, swrite, serror = select.select(socklist, [], [], 5)
except InterruptedError:
continue
for sock in sread:
if sock is self.server_socket:
qemusock, addr = self.server_socket.accept()
qemusock.setblocking(0)
socklist.append(qemusock)
socklist.remove(self.server_socket)
logger.info("Connection from %s:%s" % addr)
else:
data = data + sock.recv(1024)
if data:
try:
data = data.decode("utf-8", errors="surrogateescape")
bootlog += data
data = b''
if re.search(".* login:", bootlog):
self.server_socket = qemusock
stopread = True
reachedlogin = True
logger.info("Reached login banner")
except UnicodeDecodeError:
continue
else:
socklist.remove(sock)
sock.close()
stopread = True
if not reachedlogin:
logger.info("Target didn't reached login boot in %d seconds" % self.boottime)
lines = "\n".join(bootlog.splitlines()[-25:])
logger.info("Last 25 lines of text:\n%s" % lines)
logger.info("Check full boot log: %s" % self.logfile)
self._dump_host()
self.stop()
return False
# If we are not able to login the tests can continue
try:
(status, output) = self.run_serial("root\n", raw=True)
if re.search("root@[a-zA-Z0-9\-]+:~#", output):
self.logged = True
logger.info("Logged as root in serial console")
if netconf:
# configure guest networking
cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask)
output = self.run_serial(cmd, raw=True)[1]
if re.search("root@[a-zA-Z0-9\-]+:~#", output):
logger.info("configured ip address %s", self.ip)
else:
logger.info("Couldn't configure guest networking")
else:
logger.info("Couldn't login into serial console"
" as root using blank password")
except:
logger.info("Serial console failed while trying to login")
else:
logger.info("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
self._dump_host()
self.stop()
logger.info("Output from runqemu:\n%s" % self.getOutput(output))
return False
return self.is_alive()
def stop(self):
self.stop_thread()
self.stop_qemu_system()
if hasattr(self, "origchldhandler"):
signal.signal(signal.SIGCHLD, self.origchldhandler)
if self.runqemu:
if hasattr(self, "monitorpid"):
os.kill(self.monitorpid, signal.SIGKILL)
logger.info("Sending SIGTERM to runqemu")
try:
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
endtime = time.time() + self.runqemutime
while self.runqemu.poll() is None and time.time() < endtime:
time.sleep(1)
if self.runqemu.poll() is None:
logger.info("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
self.runqemu = None
if hasattr(self, 'server_socket') and self.server_socket:
self.server_socket.close()
self.server_socket = None
self.qemupid = None
self.ip = None
def stop_qemu_system(self):
if self.qemupid:
try:
# qemu-system behaves well and a SIGTERM is enough
os.kill(self.qemupid, signal.SIGTERM)
except ProcessLookupError as e:
logger.warn('qemu-system ended unexpectedly')
def stop_thread(self):
if self.thread and self.thread.is_alive():
self.thread.stop()
self.thread.join()
def restart(self, qemuparams = None):
logger.info("Restarting qemu process")
if self.runqemu.poll() is None:
self.stop()
if self.start(qemuparams):
return True
return False
def is_alive(self):
if not self.runqemu:
return False
qemu_child = self.find_child(str(self.runqemu.pid))
if qemu_child:
self.qemupid = qemu_child[0]
if os.path.exists("/proc/" + str(self.qemupid)):
return True
return False
def find_child(self,parent_pid):
#
# Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
#
ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
processes = ps.decode("utf-8").split('\n')
nfields = len(processes[0].split()) - 1
pids = {}
commands = {}
for row in processes[1:]:
data = row.split(None, nfields)
if len(data) != 3:
continue
if data[1] not in pids:
pids[data[1]] = []
pids[data[1]].append(data[0])
commands[data[0]] = data[2]
if parent_pid not in pids:
return []
parents = []
newparents = pids[parent_pid]
while newparents:
next = []
for p in newparents:
if p in pids:
for n in pids[p]:
if n not in parents and n not in next:
next.append(n)
if p not in parents:
parents.append(p)
newparents = next
#print("Children matching %s:" % str(parents))
for p in parents:
# Need to be careful here since runqemu runs "ldd qemu-system-xxxx"
# Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx"
basecmd = commands[p].split()[0]
basecmd = os.path.basename(basecmd)
if "qemu-system" in basecmd and "-serial tcp" in commands[p]:
return [int(p),commands[p]]
def run_serial(self, command, raw=False, timeout=5):
# We assume target system have echo to get command status
if not raw:
command = "%s; echo $?\n" % command
data = ''
status = 0
self.server_socket.sendall(command.encode('utf-8'))
start = time.time()
end = start + timeout
while True:
now = time.time()
if now >= end:
data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout
break
try:
sread, _, _ = select.select([self.server_socket],[],[], end - now)
except InterruptedError:
continue
if sread:
answer = self.server_socket.recv(1024)
if answer:
data += answer.decode('utf-8')
# Search the prompt to stop
if re.search("[a-zA-Z0-9]+@[a-zA-Z0-9\-]+:~#", data):
break
else:
raise Exception("No data on serial console socket")
if data:
if raw:
status = 1
else:
# Remove first line (command line) and last line (prompt)
data = data[data.find('$?\r\n')+4:data.rfind('\r\n')]
index = data.rfind('\r\n')
if index == -1:
status_cmd = data
data = ""
else:
status_cmd = data[index+2:]
data = data[:index]
if (status_cmd == "0"):
status = 1
return (status, str(data))
def _dump_host(self):
self.host_dumper.create_dir("qemu")
logger.warn("Qemu ended unexpectedly, dump data from host"
" is in %s" % self.host_dumper.dump_dir)
self.host_dumper.dump_host()
# This class is for reading data from a socket and passing it to logfunc
# to be processed. It's completely event driven and has a straightforward
# event loop. The mechanism for stopping the thread is a simple pipe which
# will wake up the poll and allow for tearing everything down.
class LoggingThread(threading.Thread):
def __init__(self, logfunc, sock, logger):
self.connection_established = threading.Event()
self.serversock = sock
self.logfunc = logfunc
self.logger = logger
self.readsock = None
self.running = False
self.errorevents = select.POLLERR | select.POLLHUP | select.POLLNVAL
self.readevents = select.POLLIN | select.POLLPRI
threading.Thread.__init__(self, target=self.threadtarget)
def threadtarget(self):
try:
self.eventloop()
finally:
self.teardown()
def run(self):
self.logger.info("Starting logging thread")
self.readpipe, self.writepipe = os.pipe()
threading.Thread.run(self)
def stop(self):
self.logger.info("Stopping logging thread")
if self.running:
os.write(self.writepipe, bytes("stop", "utf-8"))
def teardown(self):
self.logger.info("Tearing down logging thread")
self.close_socket(self.serversock)
if self.readsock is not None:
self.close_socket(self.readsock)
self.close_ignore_error(self.readpipe)
self.close_ignore_error(self.writepipe)
self.running = False
def eventloop(self):
poll = select.poll()
event_read_mask = self.errorevents | self.readevents
poll.register(self.serversock.fileno())
poll.register(self.readpipe, event_read_mask)
breakout = False
self.running = True
self.logger.info("Starting thread event loop")
while not breakout:
events = poll.poll()
for event in events:
# An error occurred, bail out
if event[1] & self.errorevents:
raise Exception(self.stringify_event(event[1]))
# Event to stop the thread
if self.readpipe == event[0]:
self.logger.info("Stop event received")
breakout = True
break
# A connection request was received
elif self.serversock.fileno() == event[0]:
self.logger.info("Connection request received")
self.readsock, _ = self.serversock.accept()
self.readsock.setblocking(0)
poll.unregister(self.serversock.fileno())
poll.register(self.readsock.fileno(), event_read_mask)
self.logger.info("Setting connection established event")
self.connection_established.set()
# Actual data to be logged
elif self.readsock.fileno() == event[0]:
data = self.recv(1024)
self.logfunc(data)
# Since the socket is non-blocking make sure to honor EAGAIN
# and EWOULDBLOCK.
def recv(self, count):
try:
data = self.readsock.recv(count)
except socket.error as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
return ''
else:
raise
if data is None:
raise Exception("No data on read ready socket")
elif not data:
# This actually means an orderly shutdown
# happened. But for this code it counts as an
# error since the connection shouldn't go away
# until qemu exits.
raise Exception("Console connection closed unexpectedly")
return data
def stringify_event(self, event):
val = ''
if select.POLLERR == event:
val = 'POLLER'
elif select.POLLHUP == event:
val = 'POLLHUP'
elif select.POLLNVAL == event:
val = 'POLLNVAL'
return val
def close_socket(self, sock):
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def close_ignore_error(self, fd):
try:
os.close(fd)
except OSError:
pass
| gpl-2.0 | 2,719,740,783,573,503,000 | 39.107616 | 159 | 0.533375 | false |
Orpheus11/nile | nile/common/lockutils.py | 1 | 3733 | import threading
import weakref
import contextlib
import logging
import fasteners
import os
LOG = logging.getLogger(__name__)
class Semaphores(object):
def __init__(self):
self._semaphores = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def get(self, name):
with self._lock:
try:
return self._semaphores[name]
except KeyError:
sem = threading.Semaphore()
self._semaphores[name] = sem
return sem
def __len__(self):
return len(self._semaphores)
_semaphores = Semaphores()
InterProcessLock = fasteners.InterProcessLock
ReaderWriterLock = fasteners.ReaderWriterLock
def internal_lock(name, semaphores=None):
if semaphores is None:
semaphores = _semaphores
return semaphores.get(name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
return InterProcessLock(lock_file_path)
def _get_lock_path(name, lock_file_prefix, lock_path=None):
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path
if not local_lock_path:
# raise cfg.RequiredOptError('lock_path')
raise
return os.path.join(local_lock_path, name)
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None,
do_log=True, semaphores=None, delay=0.01):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The path in which to store external lock files. For
external locking to work properly, this must be the same for all
references to the lock.
:param do_log: Whether to log acquire/release messages. This is primarily
intended to reduce log message duplication when `lock` is used from the
`synchronized` decorator.
:param semaphores: Container that provides semaphores to use when locking.
This ensures that threads inside the same application can not collide,
due to the fact that external process locks are unaware of a processes
active threads.
:param delay: Delay between acquisition attempts (in seconds).
.. versionchanged:: 0.2
Added *do_log* optional parameter.
.. versionchanged:: 0.3
Added *delay* and *semaphores* optional parameters.
"""
int_lock = internal_lock(name, semaphores=semaphores)
with int_lock:
if do_log:
LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name})
try:
if external:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
ext_lock.acquire(delay=delay)
try:
yield ext_lock
finally:
ext_lock.release()
else:
yield int_lock
finally:
if do_log:
LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name})
| apache-2.0 | -4,795,306,328,737,834,000 | 33.247706 | 78 | 0.646397 | false |
Fokko/incubator-airflow | tests/test_utils/mock_operators.py | 1 | 1355 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import NamedTuple
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
# Namedtuple for testing purposes
class MockNamedTuple(NamedTuple):
var1: str
var2: str
class MockOperator(BaseOperator):
"""Operator for testing purposes."""
template_fields = ("arg1", "arg2")
@apply_defaults
def __init__(self, arg1: str = "", arg2: str = "", **kwargs):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
def execute(self, context):
pass
| apache-2.0 | 3,725,370,474,415,175,700 | 31.261905 | 65 | 0.723985 | false |
FedoraScientific/salome-paravis | test/VisuPrs/ImportMedField/B3.py | 1 | 1389 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# This case corresponds to: /visu/ImportMedField/B3 case
# Import MED file; create presentations for the given fields.
from paravistest import datadir, Import_Med_Field
import pvserver as paravis
med_file = datadir + "zzzz121b.med"
field_names = ["RESUZERODEPL____________________", "RESUZEROERRE_ELGA_NORE__________", "RESUZEROSIEF_ELGA_DEPL__________", "RESUZEROSIGM_ELNO_DEPL__________"]
prs_list = [ [0,1,5,6,7], [0,1,5,6,7], [0,1,5,6,7], [0,1,5,6,7,9] ]
Import_Med_Field(paravis.myParavis, med_file, field_names, 1, prs_list)
| lgpl-2.1 | 7,929,004,548,466,227,000 | 45.3 | 158 | 0.718503 | false |
Greymerk/python-rpg | src/world/terrain/chunkmanager.py | 1 | 1344 | from random import choice
from mapcache import MapCache
from chunk import Chunk
class ChunkManager:
def __init__(self, world):
self.world = world
self.chunkCache = []
self.mapCache = MapCache(self, self.world.seed)
self.maxCacheSize = 64
def getChunk(self, x, y):
chunkX = int(x) >> 4
chunkY = int(y) >> 4
for c in self.chunkCache:
if c.getPos() == (chunkX, chunkY):
return c
toLoad = Chunk((chunkX, chunkY), self.world.getSeed(), self.world.mobManager, self.mapCache)
self.chunkCache.append(toLoad)
if len(self.chunkCache) > self.maxCacheSize:
toUnload = self.chunkCache.popleft()
toUnload.unload()
return toLoad
def getMap(self, x, y):
return self.mapCache.get(x, y)
def getTile(self, pos):
x = int(pos[0])
y = int(pos[1])
c = self.getChunk(x, y)
return c.getTile(x % Chunk.size, y % Chunk.size)
def isLoaded(self, x, y):
for c in self.chunkCache:
if c.pos is (x, y):
return True
return False
def setTile(self, (x, y), id):
c = self.getChunk(x, y)
c.setTile((x, y), id)
def saveChunks(self):
for c in self.chunkCache:
c.unload()
def getRandomChunk(self):
return choice(self.chunkCache)
def cull(self, center, dist):
for c in self.chunkCache:
if c.getDistToChunk(center) > dist:
c.unload()
self.chunkCache.remove(c)
| gpl-3.0 | -1,728,710,524,475,288,800 | 19.676923 | 94 | 0.65253 | false |
daniel-dinu/rational-python | test_rational/test_rational.py | 1 | 32468 | import unittest2
from unittest2 import TestCase
from rational.rational import gcd
from rational.rational import Rational
__author__ = 'Daniel Dinu'
class TestRational(TestCase):
def setUp(self):
self.known_values = [(1, 2, 1, 2),
(-1, 2, -1, 2),
(1, -2, -1, 2),
(-1, -2, 1, 2),
(2, 4, 1, 2),
(-2, 4, -1, 2),
(2, -4, -1, 2),
(-2, -4, 1, 2),
(2, 1, 2, 1),
(-2, 1, -2, 1),
(2, -1, -2, 1),
(-2, -1, 2, 1),
(4, 2, 2, 1),
(-4, 2, -2, 1),
(4, -2, -2, 1),
(-4, -2, 2, 1)]
def tearDown(self):
del self.known_values
def test_constructor_numerator_type_error(self):
self.assertRaises(TypeError, Rational, 1.2)
def test_constructor_denominator_type_error(self):
self.assertRaises(TypeError, Rational, 1, 1.2)
def test_constructor_denominator_zero_division_error(self):
numerator = 1
denominator = 0
with self.subTest(numerator=numerator, denominator=denominator):
self.assertRaises(ZeroDivisionError, Rational, numerator, denominator)
numerator = Rational()
denominator = 0
with self.subTest(numerator=numerator, denominator=denominator):
self.assertRaises(ZeroDivisionError, Rational, numerator, denominator)
numerator = Rational()
denominator = Rational()
with self.subTest(numerator=numerator, denominator=denominator):
self.assertRaises(ZeroDivisionError, Rational, numerator, denominator)
def test_constructor_numerator(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
self.assertEqual(expected_numerator, r.numerator)
def test_constructor_denominator(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
self.assertEqual(expected_denominator, r.denominator)
def test_constructor_transform(self):
test_constructor_transform_values = [(Rational(1, 2), Rational(1, 2), Rational(1)),
(Rational(1, 2), Rational(1, 4), Rational(2)),
(Rational(1, 4), Rational(1, 2), Rational(1, 2)),
(Rational(-1, 2), Rational(1, 2), Rational(-1)),
(Rational(-1, 2), Rational(1, 4), Rational(-2)),
(Rational(-1, 4), Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(-1, 2), Rational(-1)),
(Rational(1, 2), Rational(-1, 4), Rational(-2)),
(Rational(1, 4), Rational(-1, 2), Rational(-1, 2)),
(Rational(-1, 2), Rational(-1, 2), Rational(1)),
(Rational(-1, 2), Rational(-1, 4), Rational(2)),
(Rational(-1, 4), Rational(-1, 2), Rational(1, 2))]
for a, b, expected_result in test_constructor_transform_values:
with self.subTest(a=a, b=b, expected_result=expected_result):
computed_result = Rational(a, b)
self.assertEqual(expected_result, computed_result)
def test_transform(self):
test_transform_values = [(1, 2, (1, 2)),
(2, 4, (2, 4)),
(-1, 2, (-1, 2)),
(-2, 4, (-2, 4)),
(1, -2, (1, -2)),
(2, -4, (2, -4)),
(-1, -2, (-1, -2)),
(-2, -4, (-2, -4)),
(Rational(1, 2), 1, (1, 2)),
(Rational(1, 2), 2, (1, 4)),
(Rational(-1, 2), 1, (-1, 2)),
(Rational(-1, 2), 2, (-1, 4)),
(Rational(1, -2), 1, (-1, 2)),
(Rational(1, -2), 2, (-1, 4)),
(Rational(1, 2), -1, (1, -2)),
(Rational(1, 2), -2, (1, -4)),
(Rational(-1, 2), -1, (-1, -2)),
(Rational(-1, 2), -2, (-1, -4)),
(1, Rational(1, 2), (2, 1)),
(2, Rational(1, 2), (4, 1)),
(-1, Rational(1, 2), (-2, 1)),
(-2, Rational(1, 2), (-4, 1)),
(1, Rational(-1, 2), (2, -1)),
(2, Rational(-1, 2), (4, -1)),
(1, Rational(1, -2), (2, -1)),
(2, Rational(1, -2), (4, -1)),
(-1, Rational(1, 2), (-2, 1)),
(-2, Rational(1, 2), (-4, 1)),
(Rational(1, 2), Rational(1, 2), (2, 2)),
(Rational(1, 2), Rational(1, 4), (4, 2)),
(Rational(1, 4), Rational(1, 2), (2, 4)),
(Rational(-1, 2), Rational(1, 2), (-2, 2)),
(Rational(-1, 2), Rational(1, 4), (-4, 2)),
(Rational(-1, 4), Rational(1, 2), (-2, 4)),
(Rational(1, 2), Rational(-1, 2), (2, -2)),
(Rational(1, 2), Rational(-1, 4), (4, -2)),
(Rational(1, 4), Rational(-1, 2), (2, -4)),
(Rational(-1, 2), Rational(-1, 2), (-2, -2)),
(Rational(-1, 2), Rational(-1, 4), (-4, -2)),
(Rational(-1, 4), Rational(-1, 2), (-2, -4))]
for a, b, expected_result in test_transform_values:
with self.subTest(a=a, b=b, expected_result=expected_result):
computed_result = Rational.transform(a, b)
self.assertEqual(expected_result, computed_result)
def test_gcd(self):
gcd_test_values = [(0, 0, 0),
(0, 1, 1),
(1, 0, 1),
(0, -1, -1),
(-1, 0, -1),
(2, 4, 2),
(-2, 4, 2),
(-2, -4, -2),
(42, 30, 6),
(42, -30, -6),
(-42, -30, -6)]
for a, b, expected_gcd in gcd_test_values:
with self.subTest(a=a, b=b, expected_gcd=expected_gcd):
computed_gcd = gcd(a, b)
self.assertEqual(expected_gcd, computed_gcd)
def test_value(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator / (expected_denominator * 1.0)
self.assertEqual(expected_value, r.value)
def test_quotient(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator // expected_denominator
self.assertEqual(expected_value, r.quotient)
def test_remainder(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator % expected_denominator
self.assertEqual(expected_value, r.remainder)
def test_str(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
if 1 == expected_denominator:
expected_str = '{0}'.format(expected_numerator)
else:
expected_str = '{0}/{1}'.format(expected_numerator, expected_denominator)
self.assertEqual(expected_str, str(r))
def test_repr(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_repr = 'Rational({0}, {1})'.format(expected_numerator, expected_denominator)
self.assertEqual(expected_repr, repr(r))
def test_float(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator / (expected_denominator * 1.0)
self.assertEqual(expected_value, float(r))
def test_int(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = Rational(numerator, denominator)
expected_value = expected_numerator // expected_denominator
self.assertEqual(expected_value, int(r))
def test_neg(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = -Rational(numerator, denominator)
self.assertEqual(-expected_numerator, r.numerator)
self.assertEqual(expected_denominator, r.denominator)
def test_pos(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = +Rational(numerator, denominator)
self.assertEqual(expected_numerator, r.numerator)
self.assertEqual(expected_denominator, r.denominator)
def test_abs(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = abs(Rational(numerator, denominator))
self.assertEqual(abs(expected_numerator), r.numerator)
self.assertEqual(expected_denominator, r.denominator)
def test_invert_zero_division_error(self):
r = Rational(0)
with self.assertRaises(ZeroDivisionError):
~r
def test_invert(self):
for numerator, denominator, expected_numerator, expected_denominator in self.known_values:
with self.subTest(numerator=numerator, denominator=denominator):
r = ~Rational(numerator, denominator)
if 0 > expected_numerator:
expected_inverted_numerator = -expected_denominator
expected_inverted_denominator = -expected_numerator
else:
expected_inverted_numerator = expected_denominator
expected_inverted_denominator = expected_numerator
self.assertEqual(expected_inverted_numerator, r.numerator)
self.assertEqual(expected_inverted_denominator, r.denominator)
def test_lt(self):
true_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
false_test_cases = [(Rational(), Rational()),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 < r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 < r2)
def test_le(self):
true_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
false_test_cases = [(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 <= r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 <= r2)
def test_eq(self):
true_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4))]
false_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 == r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 == r2)
def test_ne(self):
true_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
false_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 != r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 != r2)
def test_ge(self):
true_test_cases = [(Rational(), Rational()),
(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
false_test_cases = [(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 >= r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 >= r2)
def test_gt(self):
true_test_cases = [(Rational(1, 2), Rational()),
(Rational(), Rational(-1, 2)),
(Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 4)),
(Rational(-1, 4), Rational(-1, 2))]
false_test_cases = [(Rational(), Rational()),
(Rational(-1, 2), Rational()),
(Rational(), Rational(1, 2)),
(Rational(-1, 2), Rational(1, -2)),
(Rational(1, 2), Rational(2, 4)),
(Rational(-1, 2), Rational(1, 2)),
(Rational(1, 4), Rational(1, 2)),
(Rational(-1, 2), Rational(-1, 4))]
for r1, r2 in true_test_cases:
with self.subTest(r1=r1, r2=r2, result=True):
self.assertTrue(r1 > r2)
for r1, r2 in false_test_cases:
with self.subTest(r1=r1, r2=r2, result=False):
self.assertFalse(r1 > r2)
def test_add_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
r + 1.2
def test_add(self):
add_test_values = [(Rational(), Rational(1, 2), Rational(1, 2)),
(Rational(1, 2), Rational(), Rational(1, 2)),
(Rational(1, 2), Rational(1, 2), Rational(1, 1)),
(Rational(1, 2), Rational(-1, 2), Rational(0, 1)),
(Rational(1, 4), Rational(2, 4), Rational(3, 4)),
(Rational(1, 4), Rational(3, 4), Rational(1, 1)),
(Rational(1, 4), Rational(-3, 4), Rational(-1, 2)),
(Rational(1, 2), Rational(1, 3), Rational(5, 6)),
(Rational(2), -1, Rational(1)),
(Rational(2), 1, Rational(3))]
for r1, r2, expected_r in add_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 + r2
self.assertEqual(expected_r, r)
def test_sub_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
r - 1.2
def test_sub(self):
sub_test_values = [(Rational(), Rational(1, 2), Rational(-1, 2)),
(Rational(1, 2), Rational(), Rational(1, 2)),
(Rational(1, 2), Rational(1, 2), Rational(0, 1)),
(Rational(1, 2), Rational(-1, 2), Rational(1, 1)),
(Rational(1, 4), Rational(2, 4), Rational(-1, 4)),
(Rational(1, 4), Rational(3, 4), Rational(-1, 2)),
(Rational(1, 4), Rational(-3, 4), Rational(1, 1)),
(Rational(1, 2), Rational(1, 3), Rational(1, 6)),
(Rational(2), -1, Rational(3)),
(Rational(2), 1, Rational(1))]
for r1, r2, expected_r in sub_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 - r2
self.assertEqual(expected_r, r)
def test_mul_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
r * 1.2
def test_mul(self):
mul_test_values = [(Rational(), Rational(1, 2), Rational()),
(Rational(1, 2), Rational(), Rational()),
(Rational(1, 2), Rational(1, 2), Rational(1, 4)),
(Rational(1, 2), Rational(-1, 2), Rational(-1, 4)),
(Rational(1, 4), Rational(2, 4), Rational(1, 8)),
(Rational(1, 4), Rational(3, 4), Rational(3, 16)),
(Rational(1, 4), Rational(-3, 4), Rational(-3, 16)),
(Rational(1, 2), Rational(1, 3), Rational(1, 6)),
(Rational(2), 1, Rational(2)),
(Rational(2), -1, Rational(-2))]
for r1, r2, expected_r in mul_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 * r2
self.assertEqual(expected_r, r)
def test_truediv_zero_division_error(self):
r1 = Rational(1, 2)
r2 = Rational()
with self.assertRaises(ZeroDivisionError):
r1 / r2
def test_truediv_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
r / 1.2
def test_truediv(self):
div_test_values = [(Rational(), Rational(1, 2), Rational()),
(Rational(1, 2), Rational(1, 2), Rational(1, 1)),
(Rational(1, 2), Rational(-1, 2), Rational(-1, 1)),
(Rational(1, 4), Rational(2, 4), Rational(1, 2)),
(Rational(1, 4), Rational(3, 4), Rational(1, 3)),
(Rational(1, 4), Rational(-3, 4), Rational(-1, 3)),
(Rational(1, 2), Rational(1, 3), Rational(3, 2)),
(Rational(2), 1, Rational(2)),
(Rational(2), -1, Rational(-2))]
for r1, r2, expected_r in div_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 / r2
self.assertEqual(expected_r, r)
def test_pow_zero_division_error(self):
r = Rational()
for power in range(-3, 0):
with self.subTest(r=r, power=power):
with self.assertRaises(ZeroDivisionError):
r ** power
def test_pow_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
r ** 1.2
def test_pow(self):
pow_test_values = [(Rational(), 0, Rational()),
(Rational(), 1, Rational()),
(Rational(), 2, Rational()),
(Rational(), 3, Rational()),
(Rational(1, 2), -3, Rational(8, 1)),
(Rational(1, 2), -2, Rational(4, 1)),
(Rational(1, 2), -1, Rational(2, 1)),
(Rational(1, 2), 0, Rational(1, 1)),
(Rational(1, 2), 1, Rational(1, 2)),
(Rational(1, 2), 2, Rational(1, 4)),
(Rational(1, 2), 3, Rational(1, 8)),
(Rational(-1, 2), -3, Rational(-8, 1)),
(Rational(-1, 2), -2, Rational(4, 1)),
(Rational(-1, 2), -1, Rational(-2, 1)),
(Rational(-1, 2), 0, Rational(1, 1)),
(Rational(-1, 2), 1, Rational(-1, 2)),
(Rational(-1, 2), 2, Rational(1, 4)),
(Rational(-1, 2), 3, Rational(-1, 8)),
(Rational(1, 3), -3, Rational(27, 1)),
(Rational(1, 3), -2, Rational(9, 1)),
(Rational(1, 3), -1, Rational(3, 1)),
(Rational(1, 3), 0, Rational(1, 1)),
(Rational(1, 3), 1, Rational(1, 3)),
(Rational(1, 3), 2, Rational(1, 9)),
(Rational(1, 3), 3, Rational(1, 27)),
(Rational(-1, 3), -3, Rational(-27, 1)),
(Rational(-1, 3), -2, Rational(9, 1)),
(Rational(-1, 3), -1, Rational(-3, 1)),
(Rational(-1, 3), 0, Rational(1, 1)),
(Rational(-1, 3), 1, Rational(-1, 3)),
(Rational(-1, 3), 2, Rational(1, 9)),
(Rational(-1, 3), 3, Rational(-1, 27))]
for r1, power, expected_r in pow_test_values:
with self.subTest(r1=r1, power=power, expected_r=expected_r):
r = r1 ** power
self.assertEqual(expected_r, r)
def test_radd_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
1.2 + r
def test_radd(self):
radd_test_values = [(1, Rational(1, 2), Rational(3, 2)),
(1, Rational(), Rational(1, 1)),
(-1, Rational(1, 2), Rational(-1, 2)),
(1, Rational(-1, 2), Rational(1, 2)),
(1, Rational(2, 4), Rational(3, 2)),
(1, Rational(3, 4), Rational(7, 4)),
(1, Rational(-3, 4), Rational(1, 4)),
(1, Rational(1, 3), Rational(4, 3))]
for r1, r2, expected_r in radd_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 + r2
self.assertEqual(expected_r, r)
def test_rsub_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
1.2 - r
def test_rsub(self):
rsub_test_values = [(1, Rational(1, 2), Rational(1, 2)),
(1, Rational(), Rational(1, 1)),
(-1, Rational(1, 2), Rational(-3, 2)),
(1, Rational(-1, 2), Rational(3, 2)),
(1, Rational(2, 4), Rational(1, 2)),
(1, Rational(3, 4), Rational(1, 4)),
(1, Rational(-3, 4), Rational(7, 4)),
(1, Rational(1, 3), Rational(2, 3))]
for r1, r2, expected_r in rsub_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 - r2
self.assertEqual(expected_r, r)
def test_rmul_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
1.2 * r
def test_rmul(self):
rmul_test_values = [(1, Rational(1, 2), Rational(1, 2)),
(1, Rational(), Rational(0, 1)),
(-1, Rational(1, 2), Rational(-1, 2)),
(1, Rational(-1, 2), Rational(-1, 2)),
(1, Rational(2, 4), Rational(1, 2)),
(1, Rational(3, 4), Rational(3, 4)),
(1, Rational(-3, 4), Rational(-3, 4)),
(1, Rational(1, 3), Rational(1, 3))]
for r1, r2, expected_r in rmul_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 * r2
self.assertEqual(expected_r, r)
def test_rtruediv_zero_division_error(self):
r = Rational()
with self.assertRaises(ZeroDivisionError):
1 / r
def test_rtruediv_type_error(self):
r = Rational()
with self.assertRaises(TypeError):
1.2 / r
def test_rtruediv(self):
rdiv_test_values = [(1, Rational(1, 2), Rational(2, 1)),
(-1, Rational(1, 2), Rational(-2, 1)),
(1, Rational(-1, 2), Rational(-2, 1)),
(1, Rational(2, 4), Rational(2, 1)),
(1, Rational(3, 4), Rational(4, 3)),
(1, Rational(-3, 4), Rational(-4, 3)),
(1, Rational(1, 3), Rational(3, 1))]
for r1, r2, expected_r in rdiv_test_values:
with self.subTest(r1=r1, r2=r2, expected_r=expected_r):
r = r1 / r2
self.assertEqual(expected_r, r)
def test_rpow_zero_division_error(self):
base = 0
for denominator in range(-3, 0):
power = Rational(1, denominator)
with self.subTest(base=base, power=power):
with self.assertRaises(ZeroDivisionError):
base ** power
def test_rpow_value_error(self):
rpow_test_values = [(-2, Rational(1, 2)),
(-1, Rational(1, 2)),
(-3, Rational(-1, 2)),
(-2, Rational(-1, 2)),
(-1, Rational(-1, 2)),
(-3, Rational(1, 3)),
(-2, Rational(1, 3)),
(-1, Rational(1, 3)),
(-3, Rational(-1, 3)),
(-2, Rational(-1, 3)),
(-1, Rational(-1, 3))]
for base, power in rpow_test_values:
with self.subTest(base=base, power=power):
with self.assertRaises(ValueError):
base ** power
def test_rpow(self):
rpow_test_values = [(0, Rational(), 1),
(1, Rational(), 1),
(2, Rational(), 1),
(3, Rational(), 1),
(0, Rational(1, 2), 0),
(1, Rational(1, 2), 1),
(2, Rational(1, 2), 1.4142135623730951),
(3, Rational(1, 2), 1.7320508075688772),
(1, Rational(-1, 2), 1),
(2, Rational(-1, 2), 0.7071067811865476),
(3, Rational(-1, 2), 0.5773502691896257),
(0, Rational(1, 3), 0),
(1, Rational(1, 3), 1),
(2, Rational(1, 3), 1.2599210498948732),
(3, Rational(1, 3), 1.4422495703074083),
(1, Rational(-1, 3), 1),
(2, Rational(-1, 3), 0.7937005259840998),
(3, Rational(-1, 3), 0.6933612743506348),
(-1, Rational(1), -1),
(-2, Rational(1), -2),
(-1, Rational(-1), -1),
(-2, Rational(-2), 0.25)]
for base, power, expected_power in rpow_test_values:
with self.subTest(base=base, power=power, expected_power=expected_power):
computed_power = base ** power
self.assertAlmostEqual(expected_power, computed_power)
if '__main__' == __name__:
unittest2.main()
| mit | 958,887,407,180,967,000 | 46.123367 | 101 | 0.43951 | false |
WaveBlocks/WaveBlocks | src/WaveBlocks/MatrixPotential1S.py | 1 | 13237 | """The WaveBlocks Project
This file contains code for the representation of potentials for a single component.
These potential are of course scalar ones.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
import sympy
import numpy
from MatrixPotential import MatrixPotential
class MatrixPotential1S(MatrixPotential):
r"""
This class represents a scalar potential :math:`V\left(x\right)`. The potential is given as an
analytical :math:`1 \times 1` matrix expression. Some symbolic calculations with
the potential are supported. For example calculation of eigenvalues and
exponentials and numerical evaluation. Further, there are methods for
splitting the potential into a Taylor expansion and for basis transformations
between canonical and eigenbasis.
"""
def __init__(self, expression, variables):
r"""
Create a new ``MatrixPotential1S`` instance for a given potential matrix :math:`V\left(x\right)`.
:param expression: An expression representing the potential.
"""
#: The variable :math:`x` that represents position space.
self.x = variables[0]
#: The matrix of the potential :math:`V\left(x\right)`.
self.potential = expression
# Unpack single matrix entry
self.potential = self.potential[0,0]
self.exponential = None
self.number_components = 1
# prepare the function in every potential matrix cell for numerical evaluation
self.potential_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.potential, "numpy"))
# Symbolic and numerical eigenvalues and eigenvectors
self.eigenvalues_s = None
self.eigenvalues_n = None
self.eigenvectors_s = None
self.eigenvectors_n = None
self.taylor_eigen_s = None
self.taylor_eigen_n = None
self.remainder_eigen_s = None
self.remainder_eigen_n = None
def __str__(self):
r"""
Put the number of components and the analytical expression (the matrix) into a printable string.
"""
return """Scalar potential given by the expression: V(x) = \n""" + str(self.potential)
def get_number_components(self):
r"""
:return: The number :math:`N` of components the potential supports. In the one dimensional case, it's just 1.
"""
return 1
def evaluate_at(self, nodes, component=0, as_matrix=False):
r"""
Evaluate the potential matrix elementwise at some given grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at.
:param component: The component :math:`V_{i,j}` that gets evaluated or 'None' to evaluate all.
:param as_matrix: Dummy parameter which has no effect here.
:return: A list with the single entry evaluated at the nodes.
"""
return tuple([ numpy.array(self.potential_n(nodes), dtype=numpy.floating) ])
def calculate_eigenvalues(self):
r"""
Calculate the eigenvalue :math:`\lambda_0\left(x\right)` of the potential :math:`V\left(x\right)`.
In the scalar case this is just the matrix entry :math:`V_{0,0}`.
.. note:: This function is idempotent and the eigenvalues are memoized for later reuse.
"""
if self.eigenvalues_s is None:
self.eigenvalues_s = self.potential
self.eigenvalues_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.potential, "numpy"))
def evaluate_eigenvalues_at(self, nodes, component=None, as_matrix=False):
r"""
Evaluate the eigenvalue :math:`\lambda_0\left(x\right)` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvalue at.
:param diagonal_component: Dummy parameter that has no effect here.
:param as_matrix: Dummy parameter which has no effect here.
:return: A list with the single eigenvalue evaluated at the nodes.
"""
self.calculate_eigenvalues()
return tuple([ numpy.array(self.eigenvalues_n(nodes)) ])
def calculate_eigenvectors(self):
r"""
Calculate the eigenvector :math:`nu_0\left(x\right)` of the potential :math:`V\left(x\right)`.
In the scalar case this is just the value :math:`1`.
.. note:: This function is idempotent and the eigenvectors are memoized for later reuse.
"""
if self.eigenvectors_s is None:
self.eigenvectors_s = sympy.Matrix([[1]])
self.eigenvectors_n = sympy.vectorize(0)(sympy.lambdify(self.x, 1, "numpy"))
def evaluate_eigenvectors_at(self, nodes):
r"""
Evaluate the eigenvector :math:`nu_0\left(x\right)` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvector at.
:return: A list with the eigenvector evaluated at the given nodes.
"""
self.calculate_eigenvectors()
return tuple([ numpy.ones((1, len(nodes)), dtype=numpy.floating) ])
def project_to_eigen(self, nodes, values, basis=None):
r"""
Project a given vector from the canonical basis to the eigenbasis of the potential.
:param nodes: The grid nodes :math:`\gamma` for the pointwise transformation.
:param values: The list of vectors :math:`\varphi_i` containing the values we want to transform.
:param basis: A list of basisvectors :math:`nu_i`. Allows to use this function for external data, similar to a static function.
:return: This method does nothing and returns the values.
"""
return [ values[0].copy() ]
def project_to_canonical(self, nodes, values, basis=None):
r"""
Project a given vector from the potential's eigenbasis to the canonical basis.
:param nodes: The grid nodes :math:`\gamma` for the pointwise transformation.
:param values: The list of vectors :math:`\varphi_i` containing the values we want to transform.
:param basis: A list of basis vectors :math:`nu_i`. Allows to use this function for external data, similar to a static function.
:return: This method does nothing and returns the values.
"""
return [ values[0].copy() ]
def calculate_exponential(self, factor=1):
r"""
Calculate the matrix exponential :math:`E = \exp\left(\alpha M\right)`. In this case
the matrix is of size :math:`1 \times 1` thus the exponential simplifies to the scalar exponential function.
:param factor: A prefactor :math:`\alpha` in the exponential.
.. note:: This function is idempotent.
"""
if self.exponential is None:
self.exponential = sympy.exp(factor*self.potential)
def evaluate_exponential_at(self, nodes):
r"""
Evaluate the exponential of the potential matrix :math:`V` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the exponential at.
:return: The numerical approximation of the matrix exponential at the given grid nodes.
"""
# Hack for older sympy versions, see recent issue:
# http://www.mail-archive.com/[email protected]/msg05137.html
lookup = {"I" : 1j}
# prepare the function of every potential matrix exponential cell for numerical evaluation
self.expfunctions = sympy.vectorize(0)(sympy.lambdify(self.x, self.exponential, (lookup, "numpy")))
return tuple([ numpy.array(self.expfunctions(nodes)) ])
def calculate_jacobian(self):
r"""
Calculate the jacobian matrix for the component :math:`V_{0,0}` of the potential.
For potentials which depend only one variable :math:`x`, this equals the first derivative.
"""
self.jacobian_s = sympy.diff(self.potential, self.x)
self.jacobian_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.jacobian_s, "numpy"))
def evaluate_jacobian_at(self, nodes, component=None):
r"""
Evaluate the potential's jacobian at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` the jacobian gets evaluated at.
:param component: Dummy parameter that has no effect here.
:return: The value of the potential's jacobian at the given nodes.
"""
return tuple([ self.jacobian_n(nodes) ])
def calculate_hessian(self):
r"""
Calculate the hessian matrix for component :math:`V_{0,0}` of the potential.
For potentials which depend only one variable :math:`x`, this equals the second derivative.
"""
self.hessian_s = sympy.diff(self.potential, self.x, 2)
self.hessian_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.hessian_s, "numpy"))
def evaluate_hessian_at(self, nodes, component=None):
r"""
Evaluate the potential's hessian at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` the hessian gets evaluated at.
:param component: Dummy parameter that has no effect here.
:return: The value of the potential's hessian at the given nodes.
"""
return tuple([ self.hessian_n(nodes) ])
def calculate_local_quadratic(self, diagonal_component=None):
r"""
Calculate the local quadratic approximation :math:`U` of the potential's eigenvalue :math:`\lambda`.
:param diagonal_component: Dummy parameter that has no effect here.
.. note:: This function is idempotent.
"""
# Calculation already done at some earlier time?
if self.taylor_eigen_s is not None:
return
self.calculate_eigenvalues()
self.calculate_jacobian()
self.calculate_hessian()
self.taylor_eigen_s = [ (0, self.eigenvalues_s), (1, self.jacobian_s), (2, self.hessian_s) ]
# Construct function to evaluate the approximation at point q at the given nodes
assert(self.taylor_eigen_n is None)
self.taylor_eigen_n = [
(order, sympy.vectorize(0)(sympy.lambdify([self.x], f, "numpy")))
for order, f in self.taylor_eigen_s
]
def evaluate_local_quadratic_at(self, nodes, diagonal_component=None):
r"""
Numerically evaluate the local quadratic approximation :math:`U` of
the potential's eigenvalue :math:`\lambda` at the given grid nodes :math:`\gamma`.
This function is used for the homogeneous case.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the quadratic approximation at.
:return: An array containing the values of :math:`U` at the nodes :math:`\gamma`.
"""
return tuple([ numpy.array(f(nodes), dtype=numpy.floating) for order, f in self.taylor_eigen_n ])
def calculate_local_remainder(self, diagonal_component=None):
r"""
Calculate the non-quadratic remainder :math:`W` of the quadratic
approximation :math:`U` of the potential's eigenvalue :math:`\lambda`.
This function is used for the homogeneous case and takes into account
the leading component :math:`\chi`.
:param diagonal_component: Dummy parameter that has no effect here.
.. note:: This function is idempotent.
"""
# Calculation already done at some earlier time?
if self.remainder_eigen_s is not None:
return
self.calculate_eigenvalues()
f = self.eigenvalues_s
# point where the taylor series is computed
q = sympy.Symbol("q")
p = f.subs(self.x, q)
j = sympy.diff(f, self.x)
j = j.subs(self.x, q)
h = sympy.diff(f, self.x, 2)
h = h.subs(self.x, q)
quadratic = p + j*(self.x-q) + sympy.Rational(1,2)*h*(self.x-q)**2
# Symbolic expression for the taylor expansion remainder term
self.remainder_eigen_s = self.potential - quadratic
# Construct functions to evaluate the approximation at point q at the given nodes
assert(self.remainder_eigen_n is None)
self.remainder_eigen_n = sympy.vectorize(1)(sympy.lambdify([q, self.x], self.remainder_eigen_s, "numpy"))
def evaluate_local_remainder_at(self, position, nodes, diagonal_component=None, component=None):
r"""
Numerically evaluate the non-quadratic remainder :math:`W` of the quadratic
approximation :math:`U` of the potential's eigenvalue :math:`\lambda` at the given nodes :math:`\gamma`.
This function is used for the homogeneous and the inhomogeneous case and
just evaluates the remainder :math:`W`.
:param position: The point :math:`q` where the Taylor series is computed.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at.
:param component: Dummy parameter that has no effect here.
:return: A list with a single entry consisting of an array containing the values of :math:`W` at the nodes :math:`\gamma`.
"""
return tuple([ numpy.array(self.remainder_eigen_n(position, nodes), dtype=numpy.floating) ])
| bsd-3-clause | -6,184,886,920,704,668,000 | 40.495298 | 136 | 0.652187 | false |
naturali/tensorflow | tensorflow/python/kernel_tests/segment_reduction_ops_test.py | 1 | 23515 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.util.all_util import reveal_undocumented
class SegmentReductionHelper(tf.test.TestCase):
def _input(self, input_shape, dtype=tf.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
return tf.constant(values, shape=input_shape,
dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None):
if not x.size: return np.array([])
indices = np.asarray(indices)
if num_out_rows is None:
num_out_rows = indices[-1] + 1
output = [None] * num_out_rows
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
output = [o if o is not None else np.zeros(slice_shape) for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _assertAllClose(self, indices, np_x, tf_x):
for i in set(np.asarray(indices).ravel()):
self.assertAllClose(np_x[i], tf_x[i])
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [tf.float32,
tf.float64,
tf.int64,
tf.int32,
tf.complex64,
tf.complex128]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, tf.segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
tf.segment_mean),
(np.ndarray.__mul__, None, tf.segment_prod),
(np.minimum, None, tf.segment_min),
(np.maximum, None, tf.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, tf.segment_sum),
(np.ndarray.__mul__, None, tf.segment_prod)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (tf.complex64, tf.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
with self.test_session(use_gpu=False):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self._assertAllClose(indices, np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
tf.segment_sum(data=tf_x, segment_ids=indices)
def testSegmentIdsSize(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
s.eval()
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, 1]
result = tf.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids do not start at 0"):
s.eval()
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [1, 1, 2, 2]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids do not start at 0"):
s.eval()
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 2, 2]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing by 1"):
s.eval()
def testSegmentIdsInvalid4(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing by 1"):
s.eval()
def testSegmentIdsInvalid5(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), probably "
"because 'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid6(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, -1]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentIdsInvalid7(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, -2]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [tf.segment_sum,
tf.segment_mean,
tf.segment_min,
tf.segment_max]:
with self.test_session():
tf_x, np_x = self._input(shape, dtype=tf.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = tf.test.compute_gradient(
tf_x,
shape,
s,
[3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class UnsortedSegmentSumTest(SegmentReductionHelper):
use_gpu = False
def testValues(self):
dtypes = [tf.float32,
tf.float64,
tf.int64,
tf.int32,
tf.complex64,
tf.complex128]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(indices,
np_x,
np.add,
op2=None,
num_out_rows=num_segments)
s = tf.unsorted_segment_sum(data=tf_x,
segment_ids=indices,
num_segments=num_segments)
tf_ans = s.eval()
self._assertAllClose(indices, np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testGradient(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=tf.float64)
s = tf.unsorted_segment_sum(data=tf_x,
segment_ids=indices,
num_segments=num_segments)
jacob_t, jacob_n = tf.test.compute_gradient(
tf_x,
shape,
s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=tf.float64)
# Results from UnsortedSegmentSum
unsorted_s = tf.unsorted_segment_sum(data=tf_x,
segment_ids=indices,
num_segments=num_segments)
(unsorted_jacob_t, unsorted_jacob_n) = tf.test.compute_gradient(
tf_x,
shape,
unsorted_s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
# Results from SegmentSum
sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = tf.test.compute_gradient(
tf_x,
shape,
sorted_s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
with self.test_session(use_gpu=False):
for bad in [[-1]], [[7]]:
unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
unsorted.eval()
def testEmptySecondDimension(self):
dtypes = [np.float32,
np.float64,
np.int64,
np.int32,
np.complex64,
np.complex128]
with self.test_session(use_gpu=self.use_gpu):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = tf.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
class UnsortedSegmentSumGpuTest(UnsortedSegmentSumTest):
use_gpu = True
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices,
dtype=tf.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape,
dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (tf.constant(indices, dtype=tf.int32),
indices, a, b)
def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None):
return self._segmentReduce(segment_indices, x[indices], op1, op2)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def setUp(self):
reveal_undocumented("tensorflow.python."
"sparse_segment_mean_grad", tf)
reveal_undocumented("tensorflow.python."
"sparse_segment_sqrt_n_grad", tf)
def testValues(self):
dtypes = [tf.float32,
tf.float64,
tf.int64,
tf.int32]
mean_dtypes = [tf.float32,
tf.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, tf.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
tf.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(shape,
num_indices,
dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == tf.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self._assertAllClose(segment_indices, np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
s.eval()
def testIndiciesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
s.eval()
def testIndiciesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
s.eval()
def testSegmentsInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing by 1"):
s.eval()
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing by 1"):
s.eval()
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), probably because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids do not start at 0"):
s.eval()
def testSegmentsInvalid5(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids do not start at 0"):
s.eval()
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [tf.sparse_segment_sum, tf.sparse_segment_mean]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=tf.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = tf.test.compute_gradient(
tf_x,
shape,
s,
[3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
s.eval()
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
s.eval()
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
s.eval()
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=tf.float32) # expecting 3 segments
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [0, 1, 1, 1] # 2 segments
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
s.eval()
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
s.eval()
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
s.eval()
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=tf.float32)
ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
s.eval()
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -4,520,860,088,002,389,000 | 37.423203 | 80 | 0.587242 | false |
ampproject/amp-github-apps | project-metrics/metrics_service/scrapers/commit_scraper.py | 1 | 5432 | import datetime
from typing import Sequence
import logging
import time
import sqlalchemy
from apis import github
from database import db
from database import models
SCRAPE_INTERVAL_SECONDS = 5
def timestamp_90_days_ago() -> datetime.datetime:
return datetime.datetime.now() - datetime.timedelta(days=90)
class CommitScraper(object):
def __init__(self):
self.github = github.GitHubGraphQL()
self.session = db.Session()
self.cursor = None
def __del__(self):
self.session.close()
def _get_latest_commit_timestamp(self) -> datetime.datetime:
commit = self.session.query(models.Commit).order_by(
models.Commit.committed_at.desc()).first()
return commit.committed_at if commit else timestamp_90_days_ago()
def _get_oldest_commit_timestamp(self) -> datetime.datetime:
commit = self.session.query(models.Commit).order_by(
models.Commit.committed_at.asc()).first()
return commit.committed_at if commit else datetime.now()
def scrape_page(self,
since: str,
until: str = None,
after: str = None) -> Sequence[models.Commit]:
"""Fetch a page of commits from the repository.
Updates the cursor with the `after` field from the paging info.
Args:
since: timestamp to start scraping at.
until: timestamp to end scraping at.
after: end cursor returned by GraaphQL paging info
Returns:
The list of returned commits.
"""
history_args = 'since: "%s"' % github.Timestamp(since).git_timestamp
if after:
history_args += ', after: "%s"' % after
if until:
history_args += ', until: "%s"' % github.Timestamp(until).git_timestamp
logging.info('Querying GitHub for commits with args: %s', history_args)
response = self.github.query_main_branch("""target {{ ... on Commit {{
history(first: {page_size}, {history_args}) {{
pageInfo {{ endCursor }}
nodes {{
oid
committedDate
associatedPullRequests(first: 1) {{
nodes {{
number
}}
}}
}}
}}
}} }}""".format(page_size=github.MAX_PAGE_SIZE, history_args=history_args))
commit_history = response['target']['history']
self.cursor = commit_history['pageInfo']['endCursor']
if self.cursor is None:
raise IndexError('No further commits available from GitHub')
for commit in commit_history['nodes']:
try:
pull_request = commit['associatedPullRequests']['nodes'][0]
pull_request_status = 'UNKNOWN'
# TODO(rcebulko): Scrape CheckSuite runs and set the status
yield models.Commit(
hash=commit['oid'],
committed_at=github.Timestamp(commit['committedDate']).datetime,
pull_request=pull_request['number'],
pull_request_status=models.PullRequestStatus.UNKNOWN)
except IndexError:
logging.warn('No pull request found for commit %s', commit['oid'][:7])
def scrape_since_latest(self):
"""Scrapes latest commits from GitHub and saves them to the DB.
When the database is empty, it will scrape all commits from the last 90
days. Otherwise, it will scrape commits since the latest commit currently in
the DB.
"""
self.cursor = None
latest_timestamp = self._get_latest_commit_timestamp()
page_count = 1
try:
while True:
logging.info('Fetching page %d of commits from GitHub', page_count)
commits = self.scrape_page(since=latest_timestamp, after=self.cursor)
commit_dicts = [{
'hash': commit.hash,
'committed_at': commit.committed_at,
'pull_request': commit.pull_request,
'pull_request_status': commit.pull_request_status,
} for commit in commits]
logging.info('Scraped %d commits', len(commit_dicts))
db.get_engine().execute(
models.Commit.__table__.insert().prefix_with('IGNORE'),
commit_dicts)
page_count += 1
time.sleep(SCRAPE_INTERVAL_SECONDS)
except IndexError:
logging.info('Completed scraping %d pages of commits', page_count)
def scrape_historical(self, since: datetime.datetime):
"""Scrapes historical commits going back as far as is specified.
Args:
since: datetime to scrape backwards in commit history until
"""
self.cursor = None
oldest_timestamp = self._get_oldest_commit_timestamp()
page_count = 1
try:
while True:
logging.info('Fetching page %d of historical commits from GitHub',
page_count)
commits = self.scrape_page(
since=since, until=oldest_timestamp, after=self.cursor)
commit_dicts = [{
'hash': commit.hash,
'committed_at': commit.committed_at,
'pull_request': commit.pull_request,
'pull_request_status': commit.pull_request_status,
} for commit in commits]
logging.info('Scraped %d commits', len(commit_dicts))
db.get_engine().execute(
models.Commit.__table__.insert().prefix_with('IGNORE'),
commit_dicts)
page_count += 1
time.sleep(SCRAPE_INTERVAL_SECONDS)
except IndexError:
logging.info('Completed scraping %d pages of historical commits',
page_count)
@classmethod
def scrape(cls):
cls().scrape_since_latest()
| apache-2.0 | 4,349,031,889,459,565,600 | 32.121951 | 80 | 0.628866 | false |
tobijk/ecromedos | lib/net/ecromedos/ecmlprocessor.py | 1 | 4602 | # -*- coding: utf-8 -*-
#
# Desc: This file is part of the ecromedos Document Preparation System
# Author: Tobias Koch <[email protected]>
# License: MIT
# URL: http://www.ecromedos.net
#
import os, sys
import lxml.etree as etree
from net.ecromedos.error import ECMDSError, ECMDSPluginError
from net.ecromedos.configreader import ECMDSConfigReader
from net.ecromedos.dtdresolver import ECMDSDTDResolver
from net.ecromedos.preprocessor import ECMDSPreprocessor
class ECMLProcessor(ECMDSConfigReader, ECMDSDTDResolver, ECMDSPreprocessor):
def __init__(self, options={}):
ECMDSConfigReader.__init__(self)
ECMDSDTDResolver. __init__(self)
ECMDSPreprocessor.__init__(self)
self.readConfig(options)
self.loadPlugins()
self.loadStylesheet()
#end function
def loadXMLDocument(self, filename):
"""Try to load XML document from @filename."""
try:
# create parser
parser = etree.XMLParser(
load_dtd=True,
no_network=True,
strip_cdata=True,
remove_comments=True,
resolve_entities=True
)
# register custom resolver
parser.resolvers.add(self)
# parse the document
tree = etree.parse(filename, parser=parser)
except Exception as e:
raise ECMDSError(str(e))
# return document tree
return tree
#end function
def loadStylesheet(self):
"""Load matching stylesheet for desired output format."""
target_format = self.config['target_format']
try:
style_dir = self.config['style_dir']
except KeyError:
msg = "Please specify the location of the stylesheets."
raise ECMDSError(msg)
#end try
filename = os.path.join(style_dir, target_format, "ecmds.xsl")
try:
tree = self.loadXMLDocument(filename)
except ECMDSError as e:
msg = "Could not load stylesheet:\n %s" % (e.msg(),)
raise ECMDSError(msg)
#end try
try:
self.stylesheet = etree.XSLT(tree)
except Exception as e:
raise ECMDSError(str(e))
#end if
return self.stylesheet
#end function
def validateDocument(self, document):
"""Validate the given document."""
try:
style_dir = self.config['style_dir']
except KeyError:
msg = "Please specify the location of the stylesheets."
raise ECMDSError(msg)
#end try
# load the DTD
dtd_filename = os.path.join(style_dir, "DTD", "ecromedos.dtd")
dtd = etree.DTD(dtd_filename)
# validate the document
result = dtd.validate(document)
if result == False:
raise ECMDSError(dtd.error_log.last_error)
return result
#end function
def applyStylesheet(self, document):
"""Apply stylesheet to document."""
params = None
try:
params = self.config['xsl_params']
except KeyError: pass
try:
result = self.stylesheet(document, **params)
except Exception as e:
msg = "Error transforming document:\n %s." % (str(e),)
raise ECMDSError(msg)
#end try
return result
#end function
def process(self, filename, verbose=True):
"""Convert the document stored under filename."""
def message(msg, verbose):
if not verbose: return
sys.stdout.write(" * " + msg)
sys.stdout.write(" " * (40 - len(msg)))
sys.stdout.flush()
#end inline function
def status(status, verbose):
if not verbose: return
sys.stdout.write(status + "\n")
#end inline function
# load document
message("Reading document...", verbose)
document = self.loadXMLDocument(filename)
status("DONE", verbose)
# validate document
if self.config['do_validate']:
message("Validating document...", verbose)
self.validateDocument(document)
status("VALID", verbose)
#end if
# prepare document
message("Pre-processing document tree...", verbose)
self.prepareDocument(document)
status("DONE", verbose)
# apply stylesheet
message("Transforming document...", verbose)
self.applyStylesheet(document)
status("DONE", verbose)
#end function
#end class
| mit | 7,828,527,572,711,646,000 | 27.407407 | 76 | 0.58279 | false |
danielquinn/spirithunter | src/spirits/api/resources.py | 1 | 8242 | import json
import random
from math import sin, cos
from django.conf import settings
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from tastypie import fields
from tastypie import http
from tastypie.authentication import MultiAuthentication, Authentication, BasicAuthentication, SessionAuthentication
from tastypie.resources import ModelResource, convert_post_to_patch
from tastypie.exceptions import BadRequest
from aspects.models import Element, Facet
from geography.models import Country
from spirithunter import logger
from .authorization import SpiritAuthorization
from ..forms import PatchForm
from ..models.spirit import ElementalStrength, Spirit
class ImageMixin(object):
def dehydrate(self, bundle):
bundle.data.update({
"images": {}
})
for size in self.AVAILABLE_IMAGE_SIZES:
bundle.data["images"][str(size)] = getattr(
bundle.obj,
'image{size}'.format(size=size)
)
return bundle
class ElementResource(ImageMixin, ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32)
class Meta:
queryset = Element.objects.all()
include_resource_uri = False
resource_name = "elements"
class ElementalStrengthResource(ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32)
element = fields.ToOneField(ElementResource, "element", full=True)
class Meta:
queryset = ElementalStrength.objects.all()
include_resource_uri = False
resource_name = "elements"
class FacetResource(ImageMixin, ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32)
class Meta:
queryset = Facet.objects.all()
include_resource_uri = False
resource_name = "facets"
class NationalityResource(ModelResource):
class Meta:
queryset = Country.objects.all()
include_resource_uri = False
resource_name = "nationalities"
def dehydrate(self, bundle):
return {
"code": bundle.obj.country.code,
"name": bundle.obj.country.name,
}
class SpiritResource(ImageMixin, ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32, 64, 128, 256)
SPIRITS_TO_GENERATE = 5
SPAWN_RADIUS = 50
owner = fields.ToOneField("users.api.UserResource", "owner", null=True)
elementals = fields.ManyToManyField(
ElementalStrengthResource,
"elemental_strengths",
full=True
)
facets = fields.ManyToManyField(
FacetResource,
"facets",
full=True
)
nationalities = fields.ManyToManyField(
NationalityResource,
"nationalities",
full=True
)
class Meta:
allowed_methods = ("get", "patch",)
authentication = MultiAuthentication(
SessionAuthentication(),
BasicAuthentication(),
Authentication()
)
authorization = SpiritAuthorization()
object_class = Spirit
queryset = Spirit.objects.all()
resource_name = "spirits"
filtering = {
"id": ("exact",),
"owner": ("exact",),
"activity": ("exact",),
}
def dehydrate(self, bundle):
bundle = ModelResource.dehydrate(self, bundle)
bundle = ImageMixin.dehydrate(self, bundle)
if bundle.obj.activity == Spirit.ACTIVITY_WANDER:
if bundle.obj.health_current == 0:
bundle.data["experience_given"] = bundle.obj.get_ladder().xp_given
return bundle
@staticmethod
def dehydrate_origin(bundle):
if bundle.obj.origin:
r = json.loads(bundle.obj.origin.geojson)
r["coordinates"][0] = round(r["coordinates"][0], settings.COORDINATES_ROUNDING)
r["coordinates"][1] = round(r["coordinates"][1], settings.COORDINATES_ROUNDING)
return r
return None
@staticmethod
def dehydrate_location(bundle):
if bundle.obj.location:
r = json.loads(bundle.obj.location.geojson)
r["coordinates"][0] = round(r["coordinates"][0], settings.COORDINATES_ROUNDING)
r["coordinates"][1] = round(r["coordinates"][1], settings.COORDINATES_ROUNDING)
return r
return None
@staticmethod
def dehydrate_activity(bundle):
return {
"id": bundle.obj.activity,
"name": bundle.obj.get_activity_display()
}
def obj_get_list(self, bundle, **kwargs):
if bundle.request.GET.get("finder"):
if not bundle.request.location:
raise BadRequest(
"Finder cannot be invoked without a location header"
)
if not bundle.request.user.is_authenticated():
raise BadRequest(
"Finder is only available to authenticated users"
)
try:
return self._finder(bundle.request)
except ValidationError as e:
raise BadRequest(e.messages[0])
else:
return ModelResource.obj_get_list(self, bundle, **kwargs)
def patch_list(self, request, **kwargs):
return http.HttpNotImplemented()
def patch_detail(self, request, **kwargs):
pk = kwargs.get("pk")
request = convert_post_to_patch(request)
self.authorized_update_detail(
Spirit.objects.filter(pk=pk),
self.build_bundle(request=request)
)
form = PatchForm(
request,
get_object_or_404(Spirit, pk=pk),
self.deserialize(
request,
request.body,
format=request.META.get("CONTENT_TYPE", "application/json")
)
)
if form.is_valid():
form.save()
return self.create_response(request, "", status=202)
raise BadRequest(form.errors.as_text())
def _finder(self, request):
"""
Open the app and show me what's here. If there's nothing here (common)
make some spirits relevant to the environment to play with.
"""
lat, lng = (request.location.y, request.location.x)
if lat > 80 or lat < -80:
raise ValidationError("Invalid lat value: %s" % lat)
if lng > 180 or lng < -180:
raise ValidationError("Invalid lng value: %s" % lng)
level_low, level_high = 1, 1
if request.user.is_authenticated():
spirit_levels = sorted(
request.user.spirits.filter(
activity=Spirit.ACTIVITY_JARRED
).values_list(
"level",
flat=True
)
)
if spirit_levels:
level_low, level_high = spirit_levels[0], spirit_levels[-1]
spirits = list(Spirit.objects.filter(
activity=Spirit.ACTIVITY_WANDER,
health_current__gt=0,
location__distance_lte=(request.location, self.SPAWN_RADIUS)
))
while len(spirits) < self.SPIRITS_TO_GENERATE:
# Magic
centre_x = float(lat)
centre_y = float(lng)
r = random.uniform(0, self.SPAWN_RADIUS)
a = random.uniform(0, 360)
target_x = centre_x + ((r * cos(a)) / settings.M_LNG)
target_y = centre_y + ((r * sin(a)) / settings.M_LAT)
# /Magic
logger.debug("Creating a spirit at {lat},{lng}".format(
lat=target_x,
lng=target_y
))
spirit = Spirit.objects.create_for_environment(
centre=(centre_x, centre_y),
target=(target_x, target_y),
level_low=level_low,
level_high=level_high
)
spirits.append(spirit)
# Feel lucky?
if random.randint(1, 10) == 5:
# Start encounter immediately
pass
return SpiritResource.get_object_list(self, request).filter(
activity=Spirit.ACTIVITY_WANDER,
health_current__gt=0,
location__distance_lte=(request.location, 5000)
)
| agpl-3.0 | -7,481,127,565,425,262,000 | 26.565217 | 115 | 0.579592 | false |
linuxrocks123/MailTask | mt_attache.py | 1 | 3151 | #! /usr/bin/env python
# MailTask Alpha: The Email Manager
# Copyright (C) 2015 Patrick Simmons
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import codecs
import fltk
from html2text import html2text
import os
import tempfile
#Note: EVERY method here must correctly handle unicode by decoding it with utf-8/replace,
#then ENCODING it with utf-8
#Note: FLTK 1.1 seems to use ISO-8859-1 as its native encoding.
# FLTK 1.3 changes this to UTF-8.
#FLTK_ENCODING="ISO-8859-1"
FLTK_ENCODING="UTF-8"
def text_plain(submsg,mime_encoding):
return submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace").encode(encoding=FLTK_ENCODING,errors="replace")
def text_html(submsg,mime_encoding):
return html2text(submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace")).encode(encoding=FLTK_ENCODING,errors="replace")
def application_pdf(submsg,mime_encoding):
temptuple=tempfile.mkstemp()
os.fdopen(temptuple[0],'w').write(submsg.get_payload(decode=True))
os.system("xpdf "+temptuple[1]+" & ( sleep 10; rm "+temptuple[1]+" ) &")
return "PDF file opened"
def application_octetstream(submsg,mime_encoding):
fc = fltk.Fl_File_Chooser(".","*",fltk.Fl_File_Chooser.CREATE,"Select Save Location")
fc.show()
while fc.shown():
fltk.Fl_wait()
if fc.value()==None:
return submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace").encode(encoding=FLTK_ENCODING,errors="replace")
open(fc.value(),'w').write(submsg.get_payload(decode=True))
return "Undisplayable file; saved to "+fc.value()
def display_submessage(submsg):
if submsg['Content-Transfer-Encoding']==None:
del submsg['Content-Transfer-Encoding']
if submsg.get_payload(decode=True)==None:
return ""
ATTACHE = { "text/plain" : text_plain, "text/html" : text_html,
"application/pdf" : application_pdf }
mime_encoding = submsg.get_content_charset()
if mime_encoding==None:
mime_encoding="utf-8"
else:
try:
codecs.lookup(mime_encoding)
valid_encoding = True
except LookupError:
valid_encoding = False
if not valid_encoding:
mime_encoding="utf-8"
mimetype = submsg.get_content_type()
print mimetype
if mimetype in ATTACHE:
return ATTACHE[mimetype](submsg,mime_encoding)
elif mimetype.find("text/")==0:
return text_plain(submsg,mime_encoding)
return application_octetstream(submsg,mime_encoding)
| gpl-3.0 | -9,170,419,339,616,157,000 | 35.218391 | 149 | 0.699778 | false |
unapiedra/BBChop | tests/dumbdag.py | 1 | 3423 | # Copyright 2008 Ealdwulf Wuffinga
# This file is part of BBChop.
#
# BBChop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# BBChop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BBChop. If not, see <http://www.gnu.org/licenses/>.
from . import dagAlg
from BBChop.listUtils import listSub,prod
# class for computing over directed acyclic graphs.
# values are held outside the graph object, in lists
# the dag is defined by a parents relation: for each index, which indexes are its parents.
# it is required that < and > on indexes is consistent with the transitive closure of the parents
# relation. That is, if parent*(a,b) then a<b and b>a. This is checked.
# this version of the class has a simple O(N^2) implementation for test purposes
class IllFormedDAGFile(Exception): pass
class DAGWrongLength(Exception): pass
# abstract dag class: defines sum,and type functions in terms of comb functions
class absDag:
def sumUpto(self,values):
return self.combUpto(values,sum)
def sumAfter(self,values):
return self.combAfter(values,sum)
def anyUpto(self,values):
return self.combUpto(values,any)
def anyAfter(self,values):
return self.combAfter(values,any)
def prodAfter(self,values):
return self.combAfter(values,prod)
class dag(absDag):
def __init__(self,parents,N):
self.parents=parents
children=[[] for i in range(N)]
for i in range(N):
for p in parents[i]:
children[p].append(i)
self.children=children
childRel=dagAlg.childLists2Rel(self.children)
self.decendentRel=dagAlg.transitiveClosure(childRel,N)
# these methods assume the consistentency defined above.
# for each location, return the sum of lower locations from values
def combUpto(self,values,comb):
res=[comb([v for (i,v) in enumerate(values) if (i,j) in self.decendentRel]) for j in range(len(values))]
return res
# for each location, return the sum of higher locations from values
def combAfter(self,values,comb):
res=[comb([v for (i,v) in enumerate(values) if (j,i) in self.decendentRel]) for j in range(len(values))]
return res
# for each location, return the sum of locations neither lower or higher from values
# we do this by taking the total and subtracting everything else.
def sumOther(self,values,sumUpto=None,sumAfter=None):
# save recalculating sumUpto/After if already known
if sumUpto is None:
sumUpto=self.sumUpto(values)
if sumAfter is None:
sumAfter=self.sumAfter(values)
sums=[sum(values)]*len(values)
#
sums=listSub(sums,values,sumUpto,sumAfter)
return sums
def linearTestDag(N):
parents=['%d %d' %(a+1,a) for a in range(N-1)]
parents[:0]='0'
return dag(parents,N)
| gpl-2.0 | -1,333,789,492,420,188,700 | 29.5625 | 112 | 0.674847 | false |
jleete97/python-graphics | games/turns/reversi/reversi.py | 1 | 3731 | import random
import sys
import time
from reversiboard import *
from games.turns.reversi.reversimoves import *
# Window parameters
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 700
# Colors
DARK_GREEN = (0, 128, 0)
DARK_GREY = (128, 128, 128)
LIGHT_RED = (255, 192, 192)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# Board size (number of squares on each side)
BOARD_SIZE = 8
HUMAN = 'human'
COMPUTER = 'computer'
# Players: computer is 'W', human is 'B'
# Pick random starting player
sides = [ HUMAN, COMPUTER ]
colors = { HUMAN : WHITE , COMPUTER : BLACK }
pygame.init()
surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), 0, 32)
another_game = True
while another_game:
playerIndex = random.randrange(2)
board = ReversiBoard(BOARD_SIZE, sides)
drawer = ReversiBoardDrawer(board,
surface,
WINDOW_WIDTH,
WINDOW_HEIGHT,
DARK_GREY,
DARK_GREEN,
GREEN,
sides, colors)
try:
playing = True
missedMoves = 0
winner = None
while playing:
opponentIndex = 1 - playerIndex
player = sides[playerIndex]
opponent = sides[opponentIndex]
drawer.drawBoard()
moveResult = []
if board.noLegalMoves(player, opponent):
print(player + " has no legal move.")
move = None
time.sleep(3)
else:
print(player + " is moving...")
if player == HUMAN:
while moveResult == []:
move = getPlayerMove(drawer)
moveResult = board.resultOfMove(move, player, opponent)
else:
move = getComputerMove(board, COMPUTER, HUMAN)
moveResult = board.resultOfMove(move, player, opponent)
print(" move result: " + str(moveResult))
displayMove = None
if (move is not None):
displayMove = (move[0] + 1, move[1] + 1);
print(player + " has moved: " + str(displayMove))
if move is None:
missedMoves += 1
else:
missedMoves = 0
if missedMoves == 2:
winner = board.determineWinner()
playing = False
else:
board.apply(move, moveResult, player)
drawer.drawMove(move, player)
if board.isFull():
winner = board.determineWinner()
playing = False
playerIndex = 1 - playerIndex
except PlayerQuitException:
pass
if winner is None:
outcome = "The game is a tie."
else:
outcome = "The " + winner + " wins!"
fontObj = pygame.font.Font('freesansbold.ttf', 32)
textSurface = fontObj.render(outcome, True, LIGHT_RED, DARK_GREY)
textRect = textSurface.get_rect()
textRect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2)
surface.blit(textSurface, textRect)
pygame.display.update()
asking_about_another_game = True
while asking_about_another_game:
for event in pygame.event.get():
if event.type == QUIT:
another_game = False
asking_about_another_game = False
break
elif event.type == KEYUP and event.key in [K_ESCAPE, ord('r')]:
asking_about_another_game = False
break
pygame.display.update()
pygame.quit()
sys.exit()
| mit | -7,343,959,332,232,993,000 | 26.233577 | 79 | 0.5197 | false |
Lothiraldan/MongoTSDB | tests/test_range.py | 1 | 3744 | import unittest
from mongotsdb import Range, SubRange, RangeSet, MultiRangeWorker, RangeWorker
class RangeTestCase(unittest.TestCase):
def setUp(self):
self.start = 0
self.stop = 7
self.step = self.stop - self.start
self.r = Range(self.start, self.stop)
def test_instantiation(self):
self.assertTrue(self.r.is_empty())
self.assertFalse(self.r.is_partial())
self.assertFalse(self.r.is_full())
def test_sub_range_beggining(self):
value = 42
sub_range = SubRange(0, 3, value)
self.r.add_sub_range(sub_range)
self.assertFalse(self.r.is_empty())
self.assertTrue(self.r.is_partial())
self.assertFalse(self.r.is_full())
expected_subrange = SubRange(4, 7)
self.assertEqual(self.r.get_missing_ranges(), [expected_subrange])
def test_sub_range_end(self):
value = 42
sub_range = SubRange(4, 7, value)
self.r.add_sub_range(sub_range)
self.assertFalse(self.r.is_empty())
self.assertTrue(self.r.is_partial())
self.assertFalse(self.r.is_full())
expected_subrange = SubRange(0, 3)
self.assertEqual(self.r.get_missing_ranges(), [expected_subrange])
def test_sub_range_middle(self):
value = 42
sub_range = SubRange(2, 6, value)
self.r.add_sub_range(sub_range)
self.assertFalse(self.r.is_empty())
self.assertTrue(self.r.is_partial())
self.assertFalse(self.r.is_full())
expected_subrange_1 = SubRange(0, 1)
expected_subrange_2 = SubRange(7, 7)
self.assertEqual(self.r.get_missing_ranges(), [expected_subrange_1,
expected_subrange_2])
def test_sub_range_full(self):
value = 42
sub_range = SubRange(0, 7, value)
self.r.add_sub_range(sub_range)
self.assertFalse(self.r.is_empty())
self.assertFalse(self.r.is_partial())
self.assertTrue(self.r.is_full())
self.assertEqual(self.r.get_missing_ranges(), [])
def test_sub_range_full_with_2_subranges(self):
value = 42
sub_range1 = SubRange(0, 3, value)
value = 42
sub_range2 = SubRange(4, 7, value)
self.r.add_sub_range(sub_range1)
self.r.add_sub_range(sub_range2)
self.assertFalse(self.r.is_empty())
self.assertFalse(self.r.is_partial())
self.assertTrue(self.r.is_full())
self.assertEqual(self.r.get_missing_ranges(), [])
class RangeSetTestCase(unittest.TestCase):
def setUp(self):
self.start = 0
self.stop = 49
self.step = 10
self.range_set = RangeSet(self.start, self.stop, self.step)
def test_simple(self):
workers = list(self.range_set.generate_workers())
expected_workers = [MultiRangeWorker(0, 49, 10)]
self.assertEqual(workers, expected_workers)
def test_add_subrange(self):
sub_range = SubRange(22, 25, value=42)
self.range_set.add_sub_range(sub_range)
workers = list(self.range_set.generate_workers())
expected_range = Range(20, 29)
expected_range.missing_ranges = [SubRange(20, 21),
SubRange(26, 29)]
expected_range.sub_ranges = [sub_range]
partial_range_worker = RangeWorker(expected_range)
expected_workers = [MultiRangeWorker(0, 19, 10),
partial_range_worker, MultiRangeWorker(30, 49, 10)]
self.assertEqual(workers, expected_workers)
def test_not_aligned_ranges(self):
start = 5
stop = 25
step = 10
range_set = RangeSet(start, stop, step)
self.assertEqual(range_set.ranges, [Range(5, 9), Range(10, 19),
Range(20, 25)])
| gpl-3.0 | -5,210,675,510,729,629,000 | 28.480315 | 78 | 0.60844 | false |
Foair/course-crawler | mooc/xuetangx.py | 1 | 7412 | # -*- coding: utf-8 -*-
"""学堂在线"""
import json
from bs4 import BeautifulSoup
from .utils import *
BASE_URL = 'http://www.xuetangx.com'
CANDY = Crawler()
CONFIG = {}
FILES = {}
def get_book(url):
"""获得所有的 PDF 电子书"""
nav_page = CANDY.get(url).text
shelves = set(re.findall(r'/courses/.+/pdfbook/\d/', nav_page))
for shelf_count, shelf in enumerate(shelves, 1):
res = CANDY.get(BASE_URL + shelf).text
soup = BeautifulSoup(res, 'lxml')
WORK_DIR.change('Books', str(shelf_count))
for book_count, book in enumerate(soup.select('#booknav a'), 1):
res_print(book.string)
file_name = Resource.file_to_save(book.string) + '.pdf'
CANDY.download_bin(BASE_URL + book['rel'][0], WORK_DIR.file(file_name))
def get_handout(url):
"""从课程信息页面获得课程讲义并存为 HTML 文件"""
handouts_html = ClassicFile('Handouts.html')
res = CANDY.get(url).text
soup = BeautifulSoup(res, 'lxml')
handouts = soup.find(class_='handouts')
# 将相对地址替换为绝对地址
for link in handouts.select('a[href^="/"]'):
link['href'] = BASE_URL + link['href']
handouts_html.write_string('<!DOCTYPE html>\n<html>\n<head>\n<title>讲义</title>\n<meta charset="utf-8">\n'
'</head>\n<body>\n%s</body>\n</html>' % handouts.prettify())
def get_video(video):
"""根据视频 ID 和文件名字获取视频信息"""
file_name = video.file_name
res_print(file_name + '.mp4')
res = CANDY.get('https://xuetangx.com/videoid2source/' + video.meta).text
try:
video_url = json.loads(res)['sources']['quality20'][0]
except:
video_url = json.loads(res)['sources']['quality10'][0]
FILES['videos'].write_string(video_url)
FILES['renamer'].write(re.search(r'(\w+-[12]0.mp4)', video_url).group(1), file_name)
def get_content(url):
"""获取网页详细内容"""
outline = Outline()
counter = Counter()
video_counter = Counter()
playlist = Playlist()
video_list = []
courseware = CANDY.get(url).text
soup = BeautifulSoup(courseware, 'lxml')
chapters = soup.find(id='accordion').find_all(class_='chapter')
for chapter in chapters:
counter.add(0)
video_counter.add(0)
chapter_title = chapter.h3.a.get_text(strip=True)
outline.write(chapter_title, counter, 0)
sections = chapter.select('ul a')
for section_info in sections:
counter.add(1)
video_counter.add(1)
section_url = BASE_URL + section_info['href']
section_title = section_info.p.string.strip()
outline.write(section_title, counter, 1)
section_page = CANDY.get(section_url).text
soup = BeautifulSoup(section_page, 'lxml')
# 对于某些需要安装 MathPlayer 插件的网页
try:
tabs = soup.find(id='sequence-list').find_all('li')
except AttributeError:
break
for tab_count, tab_info in enumerate(tabs, 1):
counter.add(2)
# title 可能出现换行符和重复,所以用 data-page-title
tab_title = tab_info.a.get('data-page-title')
outline.write(tab_title, counter)
if tab_title == 'Video' or tab_title == '视频' or tab_title == '':
tab_title = section_title
tab_sequence = tab_info.a.get('aria-controls')
tab_escape = soup.find(id=tab_sequence).string
tab = BeautifulSoup(tab_escape, 'lxml').div.div
blocks = tab.find_all('div', class_='xblock')
for block in blocks:
try:
# 极少数没有 data-type 属性
block_type = block['data-type']
except KeyError:
continue
if block_type == 'Video':
video_counter.add(2)
# 替换连续空格或制表符为单个空格
video_name = block.h2.string.strip()
outline.write(video_name, video_counter, level=3, sign='#')
if video_name == 'Video' or video_name == '视频' or video_name == '':
video_name = tab_title
video_id = block.div['data-ccsource']
video = Video(video_counter, video_name, video_id)
video_list.append(video)
if CONFIG['sub']:
get_subtitles(block.div['data-transcript-available-translations-url'],
block.div['data-transcript-translation-url'],
video.file_name)
if video_list:
WORK_DIR.change('Videos')
rename = WORK_DIR.file('Names.txt') if CONFIG['rename'] else False
if CONFIG['dpl']:
parse_res_list(video_list, rename, playlist.write, get_video)
else:
parse_res_list(video_list, rename, get_video)
def get_subtitles(available, transcript, file_name):
"""获取字幕"""
subtitle_available_url = BASE_URL + available
try:
subtitle_available = CANDY.get(subtitle_available_url).json()
except json.decoder.JSONDecodeError:
return
WORK_DIR.change('Videos')
base_subtitle_url = BASE_URL + transcript + '/'
multi_subtitle = False if len(subtitle_available) == 1 else True
for subtitle_desc in subtitle_available:
subtitle_url = base_subtitle_url + subtitle_desc
CANDY.get(subtitle_url)
if multi_subtitle:
sub_file_name = file_name + '_' + subtitle_desc.replace('_xuetangx', '') + '.srt'
else:
sub_file_name = file_name + '.srt'
subtitle = CANDY.get(subtitle_available_url.rstrip('available_translations') + 'download').content
with open(WORK_DIR.file(sub_file_name), 'wb') as subtitle_file:
subtitle_file.write(subtitle)
def get_summary(url):
"""从课程地址获得课程文件夹名称"""
about_page = CANDY.get(url).text
soup = BeautifulSoup(about_page, 'lxml')
course_name = soup.find(id='title1').string
institution = soup.find(class_='courseabout_text').a.string
dir_name = course_dir(course_name, institution)
print(dir_name)
return dir_name
def start(url, config, cookies=None):
"""调用接口函数"""
global WORK_DIR
CONFIG.update(config)
CANDY.set_cookies(cookies)
status = CANDY.get('http://www.xuetangx.com/header_ajax')
if status.json()['login']:
print('验证成功!')
else:
print('Cookie 失效。请获取新的 Cookie 并删除 xuetangx.json。')
return
course_name = get_summary(url)
WORK_DIR = WorkingDir(CONFIG['dir'], course_name)
WORK_DIR.change('Videos')
FILES['renamer'] = Renamer(WORK_DIR.file('Rename.bat'))
FILES['videos'] = ClassicFile(WORK_DIR.file('Videos.txt'))
handout = url.rstrip('about') + 'info'
courseware = url.rstrip('about') + 'courseware'
if CONFIG['doc']:
# 使用 handout 作为入口更快
get_book(handout)
get_handout(handout)
get_content(courseware)
| mit | -4,430,089,417,675,538,400 | 32.685714 | 109 | 0.566582 | false |
googleads/google-ads-python | google/ads/googleads/v8/errors/types/keyword_plan_error.py | 1 | 1758 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"KeywordPlanErrorEnum",},
)
class KeywordPlanErrorEnum(proto.Message):
r"""Container for enum describing possible errors from applying a
keyword plan resource (keyword plan, keyword plan campaign,
keyword plan ad group or keyword plan keyword) or
KeywordPlanService RPC.
"""
class KeywordPlanError(proto.Enum):
r"""Enum describing possible errors from applying a keyword plan."""
UNSPECIFIED = 0
UNKNOWN = 1
BID_MULTIPLIER_OUT_OF_RANGE = 2
BID_TOO_HIGH = 3
BID_TOO_LOW = 4
BID_TOO_MANY_FRACTIONAL_DIGITS = 5
DAILY_BUDGET_TOO_LOW = 6
DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS = 7
INVALID_VALUE = 8
KEYWORD_PLAN_HAS_NO_KEYWORDS = 9
KEYWORD_PLAN_NOT_ENABLED = 10
KEYWORD_PLAN_NOT_FOUND = 11
MISSING_BID = 13
MISSING_FORECAST_PERIOD = 14
INVALID_FORECAST_DATE_RANGE = 15
INVALID_NAME = 16
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 8,135,299,712,212,321,000 | 32.169811 | 76 | 0.677474 | false |
TribeMedia/synapse | synapse/handlers/e2e_keys.py | 2 | 12592 | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ujson as json
import logging
from canonicaljson import encode_canonical_json
from twisted.internet import defer
from synapse.api.errors import SynapseError, CodeMessageException
from synapse.types import get_domain_from_id
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
logger = logging.getLogger(__name__)
class E2eKeysHandler(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.federation = hs.get_replication_layer()
self.device_handler = hs.get_device_handler()
self.is_mine_id = hs.is_mine_id
self.clock = hs.get_clock()
# doesn't really work as part of the generic query API, because the
# query request requires an object POST, but we abuse the
# "query handler" interface.
self.federation.register_query_handler(
"client_keys", self.on_federation_query_client_keys
)
@defer.inlineCallbacks
def query_devices(self, query_body, timeout):
""" Handle a device key query from a client
{
"device_keys": {
"<user_id>": ["<device_id>"]
}
}
->
{
"device_keys": {
"<user_id>": {
"<device_id>": {
...
}
}
}
}
"""
device_keys_query = query_body.get("device_keys", {})
# separate users by domain.
# make a map from domain to user_id to device_ids
local_query = {}
remote_queries = {}
for user_id, device_ids in device_keys_query.items():
if self.is_mine_id(user_id):
local_query[user_id] = device_ids
else:
remote_queries[user_id] = device_ids
# Firt get local devices.
failures = {}
results = {}
if local_query:
local_result = yield self.query_local_devices(local_query)
for user_id, keys in local_result.items():
if user_id in local_query:
results[user_id] = keys
# Now attempt to get any remote devices from our local cache.
remote_queries_not_in_cache = {}
if remote_queries:
query_list = []
for user_id, device_ids in remote_queries.iteritems():
if device_ids:
query_list.extend((user_id, device_id) for device_id in device_ids)
else:
query_list.append((user_id, None))
user_ids_not_in_cache, remote_results = (
yield self.store.get_user_devices_from_cache(
query_list
)
)
for user_id, devices in remote_results.iteritems():
user_devices = results.setdefault(user_id, {})
for device_id, device in devices.iteritems():
keys = device.get("keys", None)
device_display_name = device.get("device_display_name", None)
if keys:
result = dict(keys)
unsigned = result.setdefault("unsigned", {})
if device_display_name:
unsigned["device_display_name"] = device_display_name
user_devices[device_id] = result
for user_id in user_ids_not_in_cache:
domain = get_domain_from_id(user_id)
r = remote_queries_not_in_cache.setdefault(domain, {})
r[user_id] = remote_queries[user_id]
# Now fetch any devices that we don't have in our cache
@defer.inlineCallbacks
def do_remote_query(destination):
destination_query = remote_queries_not_in_cache[destination]
try:
limiter = yield get_retry_limiter(
destination, self.clock, self.store
)
with limiter:
remote_result = yield self.federation.query_client_keys(
destination,
{"device_keys": destination_query},
timeout=timeout
)
for user_id, keys in remote_result["device_keys"].items():
if user_id in destination_query:
results[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
yield preserve_context_over_deferred(defer.gatherResults([
preserve_fn(do_remote_query)(destination)
for destination in remote_queries_not_in_cache
]))
defer.returnValue({
"device_keys": results, "failures": failures,
})
@defer.inlineCallbacks
def query_local_devices(self, query):
"""Get E2E device keys for local users
Args:
query (dict[string, list[string]|None): map from user_id to a list
of devices to query (None for all devices)
Returns:
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
local_query = []
result_dict = {}
for user_id, device_ids in query.items():
if not self.is_mine_id(user_id):
logger.warning("Request for keys for non-local user %s",
user_id)
raise SynapseError(400, "Not a user here")
if not device_ids:
local_query.append((user_id, None))
else:
for device_id in device_ids:
local_query.append((user_id, device_id))
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
results = yield self.store.get_e2e_device_keys(local_query)
# Build the result structure, un-jsonify the results, and add the
# "unsigned" section
for user_id, device_keys in results.items():
for device_id, device_info in device_keys.items():
r = dict(device_info["keys"])
r["unsigned"] = {}
display_name = device_info["device_display_name"]
if display_name is not None:
r["unsigned"]["device_display_name"] = display_name
result_dict[user_id][device_id] = r
defer.returnValue(result_dict)
@defer.inlineCallbacks
def on_federation_query_client_keys(self, query_body):
""" Handle a device key query from a federated server
"""
device_keys_query = query_body.get("device_keys", {})
res = yield self.query_local_devices(device_keys_query)
defer.returnValue({"device_keys": res})
@defer.inlineCallbacks
def claim_one_time_keys(self, query, timeout):
local_query = []
remote_queries = {}
for user_id, device_keys in query.get("one_time_keys", {}).items():
if self.is_mine_id(user_id):
for device_id, algorithm in device_keys.items():
local_query.append((user_id, device_id, algorithm))
else:
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
results = yield self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
failures = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_bytes in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json.loads(json_bytes)
}
@defer.inlineCallbacks
def claim_client_keys(destination):
device_keys = remote_queries[destination]
try:
limiter = yield get_retry_limiter(
destination, self.clock, self.store
)
with limiter:
remote_result = yield self.federation.claim_client_keys(
destination,
{"one_time_keys": device_keys},
timeout=timeout
)
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
json_result[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
yield preserve_context_over_deferred(defer.gatherResults([
preserve_fn(claim_client_keys)(destination)
for destination in remote_queries
]))
defer.returnValue({
"one_time_keys": json_result,
"failures": failures
})
@defer.inlineCallbacks
def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
# TODO: Validate the JSON to make sure it has the right keys.
device_keys = keys.get("device_keys", None)
if device_keys:
logger.info(
"Updating device_keys for device %r for user %s at %d",
device_id, user_id, time_now
)
# TODO: Sign the JSON with the server key
changed = yield self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys,
)
if changed:
# Only notify about device updates *if* the keys actually changed
yield self.device_handler.notify_device_update(user_id, [device_id])
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
logger.info(
"Adding %d one_time_keys for device %r for user %r at %d",
len(one_time_keys), device_id, user_id, time_now
)
key_list = []
for key_id, key_json in one_time_keys.items():
algorithm, key_id = key_id.split(":")
key_list.append((
algorithm, key_id, encode_canonical_json(key_json)
))
yield self.store.add_e2e_one_time_keys(
user_id, device_id, time_now, key_list
)
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
# old access_token without an associated device_id. Either way, we
# need to double-check the device is registered to avoid ending up with
# keys without a corresponding device.
self.device_handler.check_device_registered(user_id, device_id)
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
defer.returnValue({"one_time_key_counts": result})
| apache-2.0 | 1,883,954,235,118,884,400 | 37.98452 | 87 | 0.542011 | false |
hoomanlogic/hoomancmd | hoomancmd/matchsuggestion.py | 1 | 17803 | ### score matched, proximity, missing, or nomatch to find the best fit command ###
# todo: Improve suggestion engine
# >> plns
# Did you mean 'logs'? : 97 : journal:90
# used by all versions
proximity_mapping = {
'q': ['a', 's', 'w', '2', '1', '`'],
'w': ['q', 'a', 's', 'd', 'e', '3', '2', '1'],
'e': ['w', 's', 'd', 'f', 'r', '4', '3', '2'],
'r': ['e', 'd', 'f', 'g', 't', '5', '4', '3'],
't': ['r', 'f', 'g', 'h', 'y', '6', '5', '4'],
'y': ['t', 'g', 'h', 'j', 'u', '7', '6', '5'],
'u': ['y', 'h', 'j', 'k', 'i', '8', '7', '6'],
'i': ['u', 'j', 'k', 'l', 'o', '9', '8', '7'],
'o': ['i', 'k', 'l', ';', 'p', '0', '9', '8'],
'p': ['o', 'l', ';', '\'', '[', '-', '0', '9'],
'[': ['p', ';', '\'', ']', '=', '-', '0'],
']': ['[', '\'', '\\', '='],
'a': ['z', 'x', 's', 'w', 'q'],
's': ['a', 'z', 'x', 'c', 'd', 'e', 'w', 'q'],
'd': ['s', 'x', 'c', 'v', 'f', 'r', 'e', 'w'],
'f': ['d', 'c', 'v', 'b', 'g', 't', 'r', 'e'],
'g': ['f', 'v', 'b', 'n', 'h', 'y', 't', 'r'],
'h': ['g', 'b', 'n', 'm', 'j', 'u', 'y', 't'],
'j': ['h', 'n', 'm', ',', 'k', 'i', 'u', 'y'],
'k': ['j', 'm', ',', '.', 'l', 'o', 'i', 'u'],
'l': ['k', ',', '.', '/', ';', 'p', 'o', 'i'],
';': ['l', '.', '/', '\'', '[', 'p'],
'\'': [';', '/', ']', '[', 'p'],
'z': [ 'x', 's', 'a'],
'x': ['z', 'c', 'd', 's', 'a'],
'c': ['x', 'v', 'f', 'd', 's'],
'v': ['c', 'b', 'g', 'f', 'd'],
'b': ['v', 'n', 'h', 'g', 'f'],
'n': ['b', 'm', 'j', 'h', 'g'],
'm': ['n', ',', 'k', 'j', 'h'],
'1': ['q', 'w', '2', '`'],
'2': ['1', 'q', 'w', 'e', '3'],
'3': ['2', 'w', 'e', 'r', '4'],
'4': ['3', 'e', 'r', 't', '5'],
'5': ['4', 'r', 't', 'y', '6'],
'6': ['5', 't', 'y', 'u', '7'],
'7': ['6', 'y', 'u', 'i', '8'],
'8': ['7', 'u', 'i', 'o', '9'],
'9': ['8', 'i', 'o', 'p', '0'],
'0': ['9', 'o', 'p', '[', '-'],
'-': ['0', 'p', '[', ']', '='],
'+': ['-', '[', ']', '\\']
}
# version 1 variables
max_extra = 1 # input has extra characters
max_missing = -1 # input has less characters
class MatchStats(object):
def __init__(self, item, disparity):
self.match = 0
self.proximity = 0
self.disparity = disparity
self.item = item
self.too_disparate = False
self.missing = 0
def increment_match(self):
self.match += 1
def increment_proximity(self):
self.proximity += 1
def increment_proximity(self):
self.proximity += 1
def increment_missing(self):
self.missing += 1
def compare(self, other_instance):
if other_instance is None:
return self
if self.proximity > other_instance.proximity:
return other_instance
elif self.proximity < other_instance.proximity:
return self
else:
if self.match > other_instance.match:
return self
elif self.match < other_instance.match:
return other_instance
else:
if self.disparity > other_instance.disparity:
return other_instance
else:
return self
class BetterMatchStats(object):
# version 2 & 3 variables
max_sequential_disparity = 2
def __init__(self, matchterm):
self.match = 0
self.proximity = 0
self.disparity = 0
self.sequential_disparity = 0
self.matchterm = matchterm
self.too_disparate = False
self.runner_up_score = 0
self.runner_up_matchterm = ''
def increment_match(self):
self.match += 1
self._reset_sequential_disparity()
def increment_proximity(self):
self.proximity += 1
self._reset_sequential_disparity()
def increment_disparity(self):
self.disparity += 1
self._increment_sequential_disparity()
if self.disparity > len(self.matchterm):
self.too_disparate = True
def _increment_sequential_disparity(self):
self.sequential_disparity += 1
if self.sequential_disparity > BetterMatchStats.max_sequential_disparity:
self.too_disparate = True
def _reset_sequential_disparity(self):
self.sequential_disparity = 0
def get_score(self):
if self.disparity == 0 and self.proximity == 0:
return 100
else:
return 100 - ((self.disparity * 2) + self.proximity)
def compare(self, other_instance):
if other_instance is None or other_instance.too_disparate:
return self
if self.too_disparate:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
if self.disparity > other_instance.disparity:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
elif self.disparity < other_instance.disparity:
return self
if self.match > other_instance.match:
return self
elif self.match < other_instance.match:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
if self.proximity > other_instance.proximity:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
else:
return self
def copy_attributes(self, other_instance):
self.match = other_instance.match
self.proximity = other_instance.proximity
self.disparity = other_instance.disparity
self.sequential_disparity = other_instance.sequential_disparity
self.too_disparate = other_instance.too_disparate
@classmethod
def copy(cls, obj):
instance = BetterMatchStats(obj.matchterm)
instance.match = obj.match
instance.proximity = obj.proximity
instance.disparity = obj.disparity
instance.sequential_disparity = obj.sequential_disparity
instance.too_disparate = obj.too_disparate
return instance
def is_in_proximity(char1, char2):
if char2 in proximity_mapping[char1]:
return True
else:
return False
# version 1
def getbestmatch_v1(input_, list_):
input_ = input_.lower()
matchstats_best = None
for item in list_:
item = item.lower()
disparity = len(input_) - len(item)
# ensure disparity isn't too great
if disparity < max_missing or disparity > max_extra:
continue
inner = input_
outer = item
if disparity < 0:
inner = input_
outer = item
elif disparity > 0:
inner = item
outer = input_
# now we put the smaller as the inner to move around
# so we use the absolute val of disparity to
# put the smaller through the scenarios
for i in range(0, abs(disparity) + 1):
outer_subset = outer[i:]
matchstats = MatchStats(item, abs(disparity))
# loop through characters and compare them
for j, inner_char in enumerate(inner):
if inner_char == outer_subset[j]:
matchstats.increment_match()
continue
elif is_in_proximity(inner_char, outer_subset[j]):
matchstats.increment_proximity()
continue
else:
matchstats.too_disparate = True
break
if not matchstats.too_disparate:
matchstats_best = matchstats.compare(matchstats_best)
if matchstats_best is None:
return None
else:
return matchstats_best.item
# version 2
def getbestmatch_v2(input_, list_):
# case insenitive matching
input_ = input_.lower()
# stores best match so far
current_matchstats_best = None
# iterate through all the possible matchterms
# to find the best match
for matchterm in list_:
# case insenitive matching
matchterm = matchterm.lower()
# ensure disparity isn't too great from the get go
# by comparing overall length, if it is too disparate
# then move on to the next matchterm
# if abs(len(input_) - len(matchterm)) > max_sequential_disparity:
# continue
# create object to hold the match stats
matchstats = BetterMatchStats(matchterm)
# run the input_ and matchterm through
# scenarios find a potential match
matchup_v2(input_, matchterm, matchstats)
# done with while because we hit the end of an index
# now let's calculate the leftover disparity
max_char_len = 0
if len(input_) > len(matchterm):
max_char_len = len(input_)
else:
max_char_len = len(matchterm)
for i in (range(0, abs(max_char_len - (matchstats.match + matchstats.proximity + matchstats.disparity)))):
matchstats.increment_disparity()
# compare the matchstats after matchup with the current best matchstats
# and set the better of the two to the best match so far
# -- may the best match win...
current_matchstats_best = matchstats.compare(current_matchstats_best)
return current_matchstats_best.matchterm
def matchup_v2(input_, matchterm, matchstats, depth=0):
input_index = 0
matchterm_index = 0
while matchterm_index < len(matchterm) and input_index < len(input_):
if input_[input_index] == matchterm[matchterm_index]:
matchstats.increment_match()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
continue
elif is_in_proximity(input_[input_index], matchterm[matchterm_index]):
matchstats.increment_proximity()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
else:
# increment disparity and check if we are too disparate
matchstats.increment_disparity()
if matchstats.too_disparate:
return
# here we need to branch and try both the possibility that input_ has
# missing or extra chars, then compare the two branches to pick the
# best matchup
# input_ may have bad chars, similar to the proximity solution,
# but treats it as a disparity
bad_char_scenario = None
if input_index + 1 <= len(input_) and matchterm_index + 1 <= len(matchterm):
bad_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v2(input_[input_index + 1:], matchterm[matchterm_index + 1:], bad_char_scenario, depth=depth+1)
# input_ may have missing chars
missing_char_scenario = None
if matchterm_index + 1 <= len(matchterm):
missing_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v2(input_[input_index:], matchterm[matchterm_index + 1:], missing_char_scenario, depth=depth+1)
# input_ may have extra chars
extra_char_scenario = None
if input_index + 1 <= len(input_):
extra_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v2(input_[input_index + 1:], matchterm[matchterm_index:], extra_char_scenario, depth=depth+1)
# if both the input_ and matchterm have reached the end of their input_
# then return
if input_index + 1 >= len(input_) and matchterm_index + 1 >= len(matchterm):
return
# grab either one that is not None and compare to the other
# one, which may be None, but one of these scenarios is
# guaranteed to not be None by this point
best_scenario = None
if missing_char_scenario is not None:
best_scenario = missing_char_scenario.compare(extra_char_scenario)
else:
best_scenario = extra_char_scenario.compare(missing_char_scenario)
# compare the winner of missing vs extra with the bad chars scenario
best_scenario = best_scenario.compare(bad_char_scenario)
# copy the attributes from the best scenario
# because simply setting the object makes the
# root caller lose the changes
matchstats.copy_attributes(best_scenario)
return
# investigate this
# >> veweerython
# Did you mean "deleteprop"?
# version 3
def getbestmatch_v3(input_, list_, set_max_sequential_disparity=None):
# case insenitive matching
input_ = input_.lower()
# stores best match so far
current_matchstats_best = None
if set_max_sequential_disparity is not None:
BetterMatchStats.max_sequential_disparity = set_max_sequential_disparity
# iterate through all the possible matchterms
# to find the best match
for matchterm in list_:
# case insenitive matching
matchterm = matchterm.lower()
# ensure disparity isn't too great from the get go
# by comparing overall length, if it is too disparate
# then move on to the next matchterm
# if abs(len(input_) - len(matchterm)) > max_sequential_disparity:
# continue
# create object to hold the match stats
matchstats = BetterMatchStats(matchterm)
if len(input_) > len(matchterm):
max_char_len = len(input_)
inner = matchterm
outer = input_
else:
max_char_len = len(matchterm)
inner = input_
outer = matchterm
# run the input_ and matchterm through
# scenarios find a potential match
matchup_v3(inner, outer, matchstats)
for i in (range(0, abs(max_char_len - (matchstats.match + matchstats.proximity + matchstats.disparity)))):
matchstats.disparity = matchstats.disparity + 1
# compare the matchstats after matchup with the current best matchstats
# and set the better of the two to the best match so far
# -- may the best match win...
current_matchstats_best = matchstats.compare(current_matchstats_best)
# >> testmatch hooman human humous humid
# humid 90 0
return current_matchstats_best
def matchup_v3(input_, matchterm, matchstats, depth=0):
input_index = 0
matchterm_index = 0
while matchterm_index < len(matchterm) and input_index < len(input_):
if input_[input_index] == matchterm[matchterm_index]:
matchstats.increment_match()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
continue
elif is_in_proximity(input_[input_index], matchterm[matchterm_index]):
matchstats.increment_proximity()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
else:
# increment disparity and check if we are too disparate
matchstats.increment_disparity()
if matchstats.too_disparate:
return
# here we need to branch and try both the possibility that input_ has
# missing or extra chars, then compare the two branches to pick the
# best matchup
# input_ may have bad chars, similar to the proximity solution,
# but treats it as a disparity
bad_char_scenario = None
if input_index + 1 <= len(input_) and matchterm_index + 1 <= len(matchterm):
bad_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v3(input_[input_index + 1:], matchterm[matchterm_index + 1:], bad_char_scenario, depth=depth+1)
# input_ may have missing chars
missing_char_scenario = None
if matchterm_index + 1 <= len(matchterm):
missing_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v3(input_[input_index:], matchterm[matchterm_index + 1:], missing_char_scenario, depth=depth+1)
# input_ may have extra chars
extra_char_scenario = None
if input_index + 1 <= len(input_):
extra_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v3(input_[input_index + 1:], matchterm[matchterm_index:], extra_char_scenario, depth=depth+1)
# if both the input_ and matchterm have reached the end of their input_
# then return
if input_index + 1 >= len(input_) and matchterm_index + 1 >= len(matchterm):
return
# grab either one that is not None and compare to the other
# one, which may be None, but one of these scenarios is
# guaranteed to not be None by this point
best_scenario = None
if missing_char_scenario is not None:
best_scenario = missing_char_scenario.compare(extra_char_scenario)
else:
best_scenario = extra_char_scenario.compare(missing_char_scenario)
# compare the winner of missing vs extra with the bad chars scenario
best_scenario = best_scenario.compare(bad_char_scenario)
# copy the attributes from the best scenario
# because simply setting the object makes the
# root caller lose the changes
matchstats.copy_attributes(best_scenario)
return
| apache-2.0 | -2,147,103,867,725,780,500 | 35.935685 | 119 | 0.559569 | false |
pernici/sympy | sympy/series/tests/test_order.py | 1 | 6982 | from sympy import Symbol, Rational, Order, C, exp, ln, log, O, var, nan, pi, S
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import w, x, y, z
def test_caching_bug():
#needs to be a first test, so that all caches are clean
#cache it
e = O(w)
#and test that this won't raise an exception
f = O(w**(-1/x/log(3)*log(5)), w)
def test_simple_1():
o = Rational(0)
assert Order(2*x) == Order(x)
assert Order(x)*3 == Order(x)
assert -28*Order(x) == Order(x)
assert Order(-23) == Order(1)
assert Order(exp(x)) == Order(1,x)
assert Order(exp(1/x)).expr == exp(1/x)
assert Order(x*exp(1/x)).expr == x*exp(1/x)
assert Order(x**(o/3)).expr == x**(o/3)
assert Order(x**(5*o/3)).expr == x**(5*o/3)
assert Order(x**2 + x + y, x) == \
Order(x**2 + x + y, y) == O(1)
raises(NotImplementedError, 'Order(x, 2 - x)')
def test_simple_2():
assert Order(2*x)*x == Order(x**2)
assert Order(2*x)/x == Order(1,x)
assert Order(2*x)*x*exp(1/x) == Order(x**2*exp(1/x))
assert (Order(2*x)*x*exp(1/x)/ln(x)**3).expr == x**2*exp(1/x)*ln(x)**-3
def test_simple_3():
assert Order(x)+x == Order(x)
assert Order(x)+2 == 2+Order(x)
assert Order(x)+x**2 == Order(x)
assert Order(x)+1/x == 1/x+Order(x)
assert Order(1/x)+1/x**2 == 1/x**2+Order(1/x)
assert Order(x)+exp(1/x) == Order(x)+exp(1/x)
def test_simple_4():
assert Order(x)**2 == Order(x**2)
assert Order(x**3)**-2 == Order(x**-6)
def test_simple_5():
assert Order(x)+Order(x**2) == Order(x)
assert Order(x)+Order(x**-2) == Order(x**-2)
assert Order(x)+Order(1/x) == Order(1/x)
def test_simple_6():
assert Order(x)-Order(x) == Order(x)
assert Order(x)+Order(1) == Order(1)
assert Order(x)+Order(x**2) == Order(x)
assert Order(1/x)+Order(1) == Order(1/x)
assert Order(x)+Order(exp(1/x)) == Order(exp(1/x))
assert Order(x**3)+Order(exp(2/x)) == Order(exp(2/x))
assert Order(x**-3)+Order(exp(2/x)) == Order(exp(2/x))
def test_simple_7():
assert 1+O(1) == O(1)
assert 2+O(1) == O(1)
assert x+O(1) == O(1)
assert 1/x+O(1) == 1/x+O(1)
def test_contains_0():
assert Order(1,x).contains(Order(1,x))
assert Order(1,x).contains(Order(1))
assert Order(1).contains(Order(1,x))
def test_contains_1():
assert Order(x).contains(Order(x))
assert Order(x).contains(Order(x**2))
assert not Order(x**2).contains(Order(x))
assert not Order(x).contains(Order(1/x))
assert not Order(1/x).contains(Order(exp(1/x)))
assert not Order(x).contains(Order(exp(1/x)))
assert Order(1/x).contains(Order(x))
assert Order(exp(1/x)).contains(Order(x))
assert Order(exp(1/x)).contains(Order(1/x))
assert Order(exp(1/x)).contains(Order(exp(1/x)))
assert Order(exp(2/x)).contains(Order(exp(1/x)))
assert not Order(exp(1/x)).contains(Order(exp(2/x)))
def test_contains_2():
assert Order(x).contains(Order(y)) is None
assert Order(x).contains(Order(y*x))
assert Order(y*x).contains(Order(x))
assert Order(y).contains(Order(x*y))
assert Order(x).contains(Order(y**2*x))
def test_contains_3():
assert Order(x*y**2).contains(Order(x**2*y)) is None
assert Order(x**2*y).contains(Order(x*y**2)) is None
def test_add_1():
assert Order(x+x) == Order(x)
assert Order(3*x-2*x**2) == Order(x)
assert Order(1+x) == Order(1,x)
assert Order(1+1/x) == Order(1/x)
assert Order(ln(x)+1/ln(x)) == Order(ln(x))
assert Order(exp(1/x)+x) == Order(exp(1/x))
assert Order(exp(1/x)+1/x**20) == Order(exp(1/x))
def test_ln_args():
assert O(log(x)) + O(log(2*x)) == O(log(x))
assert O(log(x)) + O(log(x**3)) == O(log(x))
assert O(log(x*y)) + O(log(x)+log(y)) == O(log(x*y))
def test_multivar_0():
assert Order(x*y).expr == x*y
assert Order(x*y**2).expr == x*y**2
assert Order(x*y,x).expr == x
assert Order(x*y**2,y).expr == y**2
assert Order(x*y*z).expr == x*y*z
assert Order(x/y).expr == x/y
assert Order(x*exp(1/y)).expr == x*exp(1/y)
assert Order(exp(x)*exp(1/y)).expr == exp(1/y)
def test_multivar_0a():
assert Order(exp(1/x)*exp(1/y)).expr == exp(1/x + 1/y)
def test_multivar_1():
assert Order(x+y).expr == x+y
assert Order(x+2*y).expr == x+y
assert (Order(x+y)+x).expr == (x+y)
assert (Order(x+y)+x**2) == Order(x+y)
assert (Order(x+y)+1/x) == 1/x+Order(x+y)
assert Order(x**2+y*x).expr == x**2+y*x
def test_multivar_2():
assert Order(x**2*y+y**2*x,x,y).expr == x**2*y+y**2*x
def test_multivar_mul_1():
assert Order(x+y)*x == Order(x**2+y*x,x,y)
def test_multivar_3():
assert (Order(x)+Order(y)).args in [
(Order(x), Order(y)),
(Order(y), Order(x))]
assert Order(x)+Order(y)+Order(x+y) == Order(x+y)
assert (Order(x**2*y)+Order(y**2*x)).args in [
(Order(x*y**2), Order(y*x**2)),
(Order(y*x**2), Order(x*y**2))]
assert (Order(x**2*y)+Order(y*x)) == Order(x*y)
def test_issue369():
x = Symbol('x')
y = Symbol('y', negative=True)
z = Symbol('z', complex=True)
# check that Order does not modify assumptions about symbols
Order(x)
Order(y)
Order(z)
assert x.is_positive == None
assert y.is_positive == False
assert z.is_positive == None
assert x.is_infinitesimal == None
assert y.is_infinitesimal == None
assert z.is_infinitesimal == None
def test_leading_order():
assert (x+1+1/x**5).extract_leading_order(x) == ((1/x**5, O(1/x**5)),)
assert (1+1/x).extract_leading_order(x) == ((1/x, O(1/x)),)
assert (1+x).extract_leading_order(x) == ((1, O(1, x)),)
assert (1+x**2).extract_leading_order(x) == ((1, O(1, x)),)
assert (2+x**2).extract_leading_order(x) == ((2, O(1, x)),)
assert (x+x**2).extract_leading_order(x) == ((x, O(x)),)
def test_leading_order2():
assert set((2+pi+x**2).extract_leading_order(x)) == set(((pi, O(1, x)),
(S(2), O(1, x))))
assert set((2*x+pi*x+x**2).extract_leading_order(x)) == set(((2*x, O(x)),
(x*pi, O(x))))
def test_order_leadterm():
assert O(x**2)._eval_as_leading_term(x) == O(x**2)
def test_nan():
assert not O(x).contains(nan)
def test_O1():
assert O(1) == O(1, x)
assert O(1) == O(1, y)
assert hash(O(1)) == hash(O(1, x))
assert hash(O(1)) == hash(O(1, y))
def test_getn():
# other lines are tested incidentally by the suite
assert O(x).getn() == 1
assert O(x/log(x)).getn() == 1
assert O(x**2/log(x)**2).getn() == 2
assert O(x*log(x)).getn() == 1
raises(NotImplementedError, '(O(x) + O(y)).getn()')
def test_diff():
assert O(x**2).diff(x) == O(x)
def test_getO():
assert (x).getO() is None
assert (x).removeO() == x
assert (O(x)).getO() == O(x)
assert (O(x)).removeO() == 0
assert (z + O(x) + O(y)).getO() == O(x) + O(y)
assert (z + O(x) + O(y)).removeO() == z
raises(NotImplementedError, '(O(x)+O(y)).getn()')
| bsd-3-clause | -6,458,170,842,665,350,000 | 32.567308 | 78 | 0.563879 | false |
nadgowdas/cargo | cli/cargo.py | 1 | 2456 | #!/usr/bin/env python
#Copyright IBM Corporation 2015.
#LICENSE: Apache License 2.0 http://opensource.org/licenses/Apache-2.0
import os
import optparse
import logging
from voyage import *
def main():
usage = "usage: python %prog -f <config_file> {--list | --migrate --source <source> --container <container> --target <target> (optional)--rootfs}"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-l", "--list", action="store_true", dest="listc", default=False, help="list containers")
parser.add_option("-m", "--migrate", action="store_true", dest="migrate", default=False, help="migrate container")
parser.add_option("-f", "--failover", action="store_true", dest="failover", default=False, help="failover container")
parser.add_option("--status", action="store_true", dest="status", default=False, help="query lazy replication status")
parser.add_option("--source", action="store", dest="source", default = None, help="Source Host (agent name)")
parser.add_option("--container", action="store", dest="container", default = None, help="Container name to be migrated")
parser.add_option("--target", action="store", dest="target", default = None, help="Target Host (agent name)")
parser.add_option("--rootfs", action="store_true", dest="rootfs", default=False, help="migrate rootfs")
parser.add_option("-s", "--server", action="store", dest="server", default="127.0.0.1:5000", help="Cargo server and port")
opts,args= parser.parse_args()
listc = opts.listc
migrate = opts.migrate
failover = opts.failover
server = opts.server
source = opts.source
target = opts.target
rootfs = opts.rootfs
container = opts.container
status = opts.container
if not listc and not migrate and not failover and not status:
parser.print_help()
if migrate and not source and not target and not container:
parser.print_help()
if failover and not target and not container and not server:
parser.print_help()
if status and not container:
parser.print_help()
voyage = Voyage(server)
if listc:
voyage.listcontainers()
sys.exit(0)
if migrate:
voyage.migrate(source, container, target, rootfs)
sys.exit(0)
if failover:
voyage.failover(container, target)
sys.exit(0)
if status:
voyage.getStatus(container)
if __name__=="__main__":
main()
| apache-2.0 | 8,801,125,674,314,020,000 | 35.117647 | 150 | 0.661645 | false |
Ilphrin/TuxleTriad | Menu.py | 1 | 16142 | # coding: utf-8
import pygame
import os
import sys
import gettext
from functions import *
from color import *
from pygame.locals import *
from game import Application
from Sound import Sound
from Text import Text
from Buttons import Button
from listOfCards import *
from Card import Card
pygame.init()
class Menu(pygame.sprite.Sprite):
def __init__(self, width, height):
self.FONT = "Playball.ttf"
# We create the window
self.width = width
self.height = height
fullscreen = pygame.NOFRAME
self.dimension = (self.width, self.height)
self.screen = pygame.display.set_mode(self.dimension, fullscreen)
pygame.display.set_caption("TuxleTriad")
self._load_translation()
self.bkgrnd, self.bkgrndRect = loadImage("background.jpg")
self.bkgrndRect = self.bkgrnd.get_rect()
# The Clock of the game, to manage the frame-rate
self.clock = pygame.time.Clock()
self.fps = 30
# We start the Sound object, playing music and sounds.
self.sound = Sound()
# Needed to keep track of the game if we do a pause during the game.
self.app = None
self.main()
def main(self):
elemText = [_("Play"), _("Options"), _("Rules"), _("About"),
_("Quit Game")]
self.menu = []
for elem in elemText:
self.menu.append(Text(elem, self.FONT, white, 40))
posx = 400
posy = 400 - (60 * len(elemText))
for elem in self.menu:
elem.rect.center = ((posx, posy))
posy += 100
pygame.event.clear()
self.updateMenu()
while 1:
pygame.display.flip()
deactivate()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
self.clicked()
elif event.type == QUIT:
self.quitGame()
self.clock.tick(self.fps)
def updateMenu(self):
self.screen.blit(self.bkgrnd, self.bkgrndRect)
for i in range(len(self.menu)):
self.screen.blit(self.menu[i].surface, self.menu[i].rect)
self.clock.tick(self.fps)
def quitGame(self):
setConfig(self.sound.volume)
pygame.quit()
sys.exit()
def oldMenu(self):
while(1):
for button in self.menu:
button.rect.centerx -= 100 - self.fps
if (button.rect.centerx <= - 500):
return;
self.updateMenu()
pygame.display.flip()
def clicked(self):
for button in self.menu:
if button.rect.collidepoint(pygame.mouse.get_pos()):
self.sound.clicMenu.play()
if button.text == _(u"Quit Game"):
self.quitGame()
self.oldMenu()
if button.text == _(u"Play"):
self.play()
elif button.text == _(u"Options"):
self.options()
elif button.text == _(u"Rules"):
self.rules()
elif button.text == _(u"About"):
self.about()
self.main()
def play(self):
"""User clicked on "Play" """
if self.app != None:
texts = [_("Continue"),_("Adventure"), _("Solo"),
_("Hot Seat"), _("Back")]
else:
texts = [_("Adventure"), _("Solo"), _("Hot Seat"), _("Back")]
length = len(texts)
if self.app != None:
textPos = [(250, 100), (250,200), (250, 300), (250,400),
(550, 500)]
else:
textPos = [(250, 100), (250,200), (250, 300), (550, 500)]
self.menu = []
for i in range(length):
self.menu.append(Text(texts[i], self.FONT, white, 45))
self.menu[i].rect.topleft = textPos[i]
self.updateMenu()
pygame.display.flip()
self.clock.tick(self.fps)
while 1:
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
coordinates = pygame.mouse.get_pos()
for i in range(length):
if self.menu[i].rect.collidepoint(coordinates):
self.sound.clicMenu.play()
self.oldMenu()
if self.menu[i].text == _("Adventure"):
return
elif self.menu[i].text == _("Solo"):
return
elif self.menu[i].text == _("Hot Seat"):
self.hotSeat()
elif self.menu[i].text == _("Back"):
return
elif self.menu[i].text == _("Continue"):
self.app.main()
def options(self):
texts = [_("Audio"), _("Sounds"), _("Music"), _("Back")]
length = len(texts)
textsPos = [(320, 100), (100, 200), (100, 300), (550, 500)]
self.menu = []
for i in range(length):
self.menu.append(Text(texts[i], self.FONT, white, 50))
self.menu[i].rect.topleft = textsPos[i]
bar1, bar1Rect = loadImage("barSound.jpg")
bar2, bar2Rect = loadImage("barSound.jpg")
bar1Rect.topleft = (300, 220)
bar2Rect.topleft = (300, 320)
bars = [bar1Rect, bar2Rect]
# X coordinates, relative to the bar's, of beginning and ending
# of each volume cursor.
MIN_VOLUME = 15
MAX_VOLUME = 240
# X absolute coordinates of the volume cursor.
MIN = bars[0].x + MIN_VOLUME
MAX = bars[0].x + MAX_VOLUME
cursor1, cursor1Rect = loadImage("cursorSound.png")
cursor2, cursor2Rect = loadImage("cursorSound.png")
cursor1Rect.topleft = \
(bar1Rect.x + 225 * self.sound.soundVolume, bar1Rect.y - 23)
cursor2Rect.topleft = \
(bar2Rect.x + 225 * self.sound.musicVolume, bar2Rect.y - 23)
cursors = [cursor1Rect, cursor2Rect]
self.screen.blit(self.bkgrnd, self.bkgrndRect)
self.screen.blit(bar1, bar1Rect)
self.screen.blit(bar2, bar2Rect)
self.screen.blit(cursor1, cursors[0])
self.screen.blit(cursor2, cursors[1])
for i in range(length):
self.screen.blit(self.menu[i].surface, self.menu[i].rect)
pygame.display.update()
move = 0
while 1:
event = pygame.event.wait()
mousex, mousey = pygame.mouse.get_pos()
if event.type == QUIT:
self.quitGame()
elif event.type == MOUSEBUTTONDOWN:
move = 1
reactivate()
elif event.type == MOUSEBUTTONUP:
move = 0
deactivate()
for i in range(len(bars)):
if move == 1 and bars[i].collidepoint((mousex, mousey)):
if MIN <= mousex <= MAX:
cursors[i].centerx = mousex
elif mousex > bars[i].x + MAX_VOLUME:
cursors[i].centerx = bars[i].x + MAX_VOLUME
else:
cursors[i].centerx = bars[i].x + MIN_VOLUME
volume = cursors[i].centerx - MIN
if volume != 0:
volume = (volume / 2.25) / 100.0
assert (0.0 <= volume <= 1.0)
if i == 0:
self.sound.soundVolume = volume
self.sound.playPutCard()
self.sound.update()
elif i == 1:
self.sound.musicVolume = volume
self.sound.update()
self.screen.blit(self.bkgrnd, self.bkgrndRect)
self.screen.blit(bar1, bar1Rect)
self.screen.blit(bar2, bar2Rect)
self.screen.blit(cursor1, cursors[0])
self.screen.blit(cursor2, cursors[1])
for j in range(4):
self.screen.blit(self.menu[j].surface,\
self.menu[j].rect)
pygame.display.update()
self.clock.tick(self.fps)
if move and self.menu[3].rect.collidepoint((mousex, mousey)):
del bar1, bar2, bars, cursor1, cursor2, cursors
self.oldMenu()
self.sound.clicMenu.play()
return
def about(self):
page = 1
allPage = []
pageList = []
index = 0
for number in range(len(allCards)):
pageList.append(Card(number, 1))
index += 1
if index == 3 or number == (len(allCards) or len(allCards)-1):
allPage.append(pageList)
del pageList
pageList = []
index = 0
maxPage = len(allPage)
txtPage = str(page) + "/" + str(maxPage)
navigation = [_("Back"), _("Next"), _("Quit"),
"Programming:", "Kevin \"Ilphrin\" Pellet",
"Graphics:", "Yunero Kisapsodos",
txtPage]
navigationPos = [(80,550), (650,550), (660,40), (630, 100),
(640, 130), (630, 200), (640, 230), (350,550)]
self.menu = []
for i in range(len(navigation)):
if 2 < i < 7:
size = 12
font = "rimouski sb.ttf"
else:
font = self.FONT
size = 30
self.menu.append(Text(navigation[i], font, white, size))
self.menu[i].rect.topleft = navigationPos[i]
cardPos = [(50,50), (50,200), (50, 350)]
self.screen.blit(self.bkgrnd, self.bkgrndRect)
for element in self.menu:
self.screen.blit(element.surface,element.rect)
for elem in range(len(allPage[page-1])):
card = allPage[page-1][elem]
card.rect.topleft = cardPos[elem]
card.About.rect.topleft = card.rect.topright
for elem in allPage[page-1]:
self.screen.blit(elem.image, elem.rect)
self.screen.blit(elem.About.surface, elem.About.rect)
while 1:
self.clock.tick(self.fps)
pygame.display.flip()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
coords = pygame.mouse.get_pos()
for button in self.menu:
if button.rect.collidepoint(coords):
if button.text == _("Back"):
if page > 1:
page -= 1
self.sound.putcard.play()
if button.text == _("Next"):
if page < maxPage:
page += 1
self.sound.putcard.play()
if button.text == _("Quit"):
self.oldMenu()
return
txtPage = str(page) + "/" + str(maxPage)
self.menu[7] = Text(txtPage, self.FONT, white, 30)
self.menu[7].rect.topleft = navigationPos[7]
self.screen.blit(self.bkgrnd, self.bkgrndRect)
for element in self.menu:
self.screen.blit(element.surface,element.rect)
for elem in range(len(allPage[page-1])):
card = allPage[page-1][elem]
card.rect.topleft = cardPos[elem]
card.About.rect.topleft = card.rect.topright
for elem in allPage[page-1]:
self.screen.blit(elem.image, elem.rect)
self.screen.blit(elem.About.surface,
elem.About.rect)
if event.type == QUIT:
self.quitGame()
def rules(self):
tutorialButton = Button(_(u"Tutorial"), self.FONT, white)
howtoButton = Button(_(u"How To"), self.FONT, white)
backButton = Button(_(u"Back"), self.FONT, white)
tutorialButton.rect.topleft = (250, 100)
howtoButton.rect.topleft = (250, 200)
backButton.rect.topleft = (550, 500)
self.menu = []
self.menu.append(tutorialButton)
self.menu.append(howtoButton)
self.menu.append(backButton)
self.updateMenu()
while (1):
self.clock.tick(self.fps)
pygame.display.flip()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
coords = pygame.mouse.get_pos()
for i in range(len(self.menu)):
if self.menu[i].rect.collidepoint(coords):
self.oldMenu()
if self.menu[i].text == _(u"Tutorial"):
self.main()
elif self.menu[i].text == _(u"How To"):
self.HowTo()
return
elif self.menu[i].text == _(u"Back"):
self.main()
elif event.type == QUIT:
self.quitGame()
def HowTo(self):
backButton = Button(_("Back"), self.FONT, white)
prevButton = Button(_("Prev"), self.FONT, white)
nextButton = Button(_("Next"), self.FONT, white)
page = 1
maxPage = 2
pageList = []
for i in range(maxPage):
pageList.append(pygame.image.load(getHowTo(i)))
pageRect = pageList[i - 1].get_rect()
pageRect.topleft = (-20, 0)
backButton.rect.topleft = (600, 40)
prevButton.rect.topleft = (80, 550)
nextButton.rect.topleft = (660, 550)
self.menu = []
self.menu.append(backButton)
self.menu.append(prevButton)
self.menu.append(nextButton)
self.updateMenu()
self.screen.blit(pageList[page - 1], pageRect)
while (1):
self.clock.tick(self.fps)
pygame.display.flip()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
coords = pygame.mouse.get_pos()
if backButton.rect.collidepoint(coords):
self.oldMenu()
return
elif prevButton.rect.collidepoint(coords) and page > 1:
page -= 1
elif nextButton.rect.collidepoint(coords) and page < maxPage:
page += 1
self.updateMenu()
self.screen.blit(pageList[page - 1], pageRect)
elif event.type == QUIT:
self.quitGame()
def _load_translation(self):
base_path = os.getcwd()
directory = os.path.join(base_path, 'translations')
print "Loading translations at: ", directory
params = {
'domain': 'tuxle-triad',
'fallback': True
}
if os.path.isdir(directory):
params.update({'localedir': directory})
translation = gettext.translation(**params)
translation.install("ngettext")
def solo(self):
"""1vsIA mode"""
print "Solo!"
def adventure(self):
"""Adventure mode against IA"""
print "Adventure!"
def hotSeat(self):
"""1vs1 mode"""
if self.app != None:
del self.app
Application(800, 600, self.screen, self.sound, self).main()
else:
Application(800, 600, self.screen, self.sound, self).main()
Menu(800, 600)
| mit | 1,228,710,710,715,892,200 | 36.714953 | 78 | 0.477016 | false |
mattilyra/gensim | docs/src/conf.py | 1 | 7457 | # -*- coding: utf-8 -*-
#
# gensim documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 17 13:42:21 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
html_theme = 'gensim_theme'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']
autoclass_content = "both"
napoleon_google_docstring = False # Disable support for google-style docstring
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'indextoc'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': './_templates/indexcontent.html'}
# General information about the project.
project = u'gensim'
copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# main_colour = "#ffbbbb"
html_theme_options = {
# "rightsidebar": "false",
# "stickysidebar": "true",
# "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "sidebarbgcolor": "fuckyou",
# "footerbgcolor": "#771111",
# "relbarbgcolor": "#993333",
# "sidebartextcolor": "#000000",
# "sidebarlinkcolor": "#330000",
# "codebgcolor": "#fffff0",
# "headtextcolor": "#000080",
# "headbgcolor": "#f0f0ff",
# "bgcolor": "#ffffff",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "gensim"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = ''
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}
# html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_domain_indices = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'gensimdoc'
html_show_sphinx = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
| lgpl-2.1 | -371,278,304,731,654,850 | 32.868182 | 114 | 0.707153 | false |
matham/cutils | cutils/knspace.py | 1 | 15278 | '''Provides namespace functionality for Kivy objects. It allows kivy objects
to be named and then accessed using the namespace.
:class:`KNSpace` instances are the namespaces that store the named objects.
Classes need to inherit from :class:`KNSpaceBehavior` so that the class, when
named, will be stored in the namespace. :attr:`knspace` is the default
namespace where objects are stored, unless the object is associated with a
different namespace.
Simple Example
-----------------
Here, because no namespace is specified, the default
:attr:`knspace` is used so we can access its widgets directly, as in
`knspace.keyboard`, to get the keyboard widget::
#:import knspace cutils.knspace.knspace
#:import Factory kivy.lang.Factory
<NamedTextInput@KNSpaceBehavior+TextInput>
<Keyboard@Popup>:
BoxLayout:
GridLayout:
cols: 1
NamedTextInput:
name: 'keyboard'
hint_text: 'Type something'
Label:
text: 'My Keyboard'
Button:
text: 'Close Keyboard'
on_press: root.dismiss()
<RootWidget@BoxLayout>:
Button:
on_parent: self.popup = Factory.Keyboard()
on_release: self.popup.open()
text: 'Open keyboard'
Label:
text: 'Keyboard output:\\n' + knspace.keyboard.text if knspace.keyboard else ''
To test, run a app with `RootWidget`.
Multiple Namespaces
-------------------
In the previous example, only the default namespace was used. However,
sometimes we need to split namespaces so we can reuse the name across
multiple widgets using the same name.
When a :class:`KNSpaceBehavior` derived widget is given a name, first we find
the associated namespace using the :attr:`KNSpaceBehavior.knspace` property.
Then, we create a :class:`~kivy.properties.ObjectProperty` in that namespace,
whose name is that name and assign the named widget as its value. See
:attr:`KNSpaceBehavior.knspace` for details on how that namespace is found.
In short, we check if the widget was assigned one, if not, we find the
namespace by walking up its parent tree using
:attr:`KNSpaceBehavior.knspace_key` and finding the first one with a namespace.
Finally, if not found, we use :attr:`knspace`. Therefore, above, the default
namespace was used since none was specified.
::
#:import Factory kivy.lang.Factory
<NamedTextInput@KNSpaceBehavior+TextInput>
<Keyboard@KNSpaceBehavior+Popup>:
knspace_key: 'knspace_parent'
knspace_parent: None
BoxLayout:
GridLayout:
cols: 1
NamedTextInput:
name: 'keyboard'
hint_text: 'Type something'
Label:
text: 'My Keyboard'
Button:
text: 'Close Keyboard'
on_press: root.dismiss()
<Typist@KNSpaceBehavior+BoxLayout>:
knspace: getattr(self, 'knspace').clone() # So we don't create a rule binding
Button:
on_parent:
self.popup = Factory.Keyboard()
self.popup.knspace_parent = root
on_release: self.popup.open()
text: 'Open keyboard'
Label:
text: 'Keyboard output:\\n' + root.knspace.keyboard.text if root.knspace.keyboard else ''
<RootWidget@BoxLayout>:
Typist
Typist
In this example, we wanted two typists, rather than a single keyboard.
But within a typist we wanted to be able to use names, even though typist
share identical names. To do this, we have
`knspace: getattr(self, 'knspace').clone()`. This forks the current namespace
(which happens to be the default, :attr:`knspace`) and create a namespace
shared by widgets that are offspring of that `Typist`.
Now, each `Typist` gets its own namespace, while still sharing the
default namespaces from which it was cloned for widgets not in its namespace.
`knspace_key: 'knspace_parent'` is required, since a `Popup` is not a child
the `Typist`, but they do have to share the namspace, so instead of using
`parent` to find the next namespace up the tree, we use the specified
`knspace_parent` attribute which points to the Typist and hence its
namespace.
Traditional namespace
---------------------
In the above example, we accessed the namespace using e.g.
`root.knspace.keyboard`. We can also access it without having access to e.g.
`root` like in a traditional namespace access.
We can change the above `RootWidget` into::
<RootWidget@KNSpaceBehavior+BoxLayout>:
name: 'root'
Typist
Typist
Now, we can do::
knspace.root.children[0].knspace.keyboard.hint_text = 'Type something else'
And the second Typist's keyboard will have a different hint text. Of course
we could also have done
`root.children[0].knspace.keyboard.hint_text = 'Type something else'` if had
access to the root widget.
'''
__all__ = ('KNSpace', 'KNSpaceBehavior', 'knspace')
from kivy.event import EventDispatcher
from kivy.properties import StringProperty, ObjectProperty, AliasProperty
from kivy.lang import Factory
knspace = None
'''The default :class:`KNSpace` namespace. If a :class:`KNSpace` namespace has
not been assigned to a :class:`KNSpaceBehavior` instance, then this
:class:`KNSpace` namespace serves as the default namespace.
See the examples and :class:`KNSpaceBehavior` for more details.
'''
class KNSpace(EventDispatcher):
'''Each :class:`KNSpace` instance is a namespace that stores the named Kivy
objects when they are associated with this namespace. Each named object is
stored as the value of a Kivy :class:`~kivy.properties.ObjectProperty` of
this instance whose property name is the object's given name. Both `rebind`
and `allownone` are set to `True` for the property.
See :attr:`KNSpaceBehavior` for details on how a namespace is associated
with a named object.
When storing an object in the namespace, the object's `proxy_ref` is
stored if the object has such an attribute.
:Parameters:
`parent`: (internal) A :class:`KNSpace` instance or None.
If specified, it's a parent namespace, in which case, the current
namespace will have in its namespace all its named objects
as well as the named objects of its parent and parent's parent
etc. See :meth:`clone` for more details.
'''
parent = None
'''(internal) The parent namespace instance, :class:`KNSpace`, or None. See
:meth:`clone`.
'''
__has_applied = None
def __init__(self, parent=None, **kwargs):
super(KNSpace, self).__init__(**kwargs)
self.parent = parent
self.__has_applied = set(self.properties().keys())
def __setattr__(self, name, value):
prop = super(KNSpace, self).property(name, quiet=True)
has_applied = self.__has_applied
if prop is None:
if hasattr(self, name):
super(KNSpace, self).__setattr__(name, value)
else:
value = getattr(value, 'proxy_ref', value)
self.apply_property(
**{name:
ObjectProperty(value, rebind=True, allownone=True)}
)
has_applied.add(name)
elif name not in has_applied:
self.apply_property(**{name: prop})
has_applied.add(name)
value = getattr(value, 'proxy_ref', value)
super(KNSpace, self).__setattr__(name, value)
else:
value = getattr(value, 'proxy_ref', value)
super(KNSpace, self).__setattr__(name, value)
def __getattr__(self, name):
parent = self.parent
if parent is None:
raise AttributeError(name)
return getattr(parent, name)
def property(self, name, quiet=False):
# needs to overwrite EventDispatcher.property so kv lang will work
prop = super(KNSpace, self).property(name, quiet=quiet)
if prop is not None:
return prop
prop = ObjectProperty(None, rebind=True, allownone=True)
self.apply_property(**{name: prop})
self.__has_applied.add(name)
return prop
def clone(self):
'''Creates a new :class:`KNSpace` instance which will have access to
all the named objects in the current namespace but will also have a
namespace of its own that is unique to it.
Any new names added to a :class:`KNSpaceBehavior` associated with
this instance will be accesible only through this instance
and not its parent(s). However, when looking for a named object using
this namespace, if the object is not found in this namespace we search
it's parent namespace and so on until we (don't) find it.
'''
return KNSpace(parent=self)
class KNSpaceBehavior(object):
'''Inheriting from this class allows naming of the inherited object, which
is then added to the associated namespace :attr:`knspace` and accessible
through it.
'''
_knspace = ObjectProperty(None, allownone=True)
_name = StringProperty('')
__last_knspace = None
__callbacks = None
def __init__(self, knspace=None, **kwargs):
self.knspace = knspace
super(KNSpaceBehavior, self).__init__(**kwargs)
def __knspace_clear_callbacks(self, *largs):
for obj, name, uid in self.__callbacks:
obj.unbind_uid(name, uid)
last = self.__last_knspace
self.__last_knspace = self.__callbacks = None
assert self._knspace is None
assert last
new = self.__set_parent_knspace()
if new is last:
return
self.property('_knspace').dispatch(self)
name = self.name
if not name:
return
if getattr(last, name) == self:
setattr(last, name, None)
if new:
setattr(new, name, self)
else:
raise ValueError('Object has name "{}", but no namespace'.
format(name))
def __set_parent_knspace(self):
callbacks = self.__callbacks = []
fbind = self.fbind
append = callbacks.append
parent_key = self.knspace_key
clear = self.__knspace_clear_callbacks
append((self, 'knspace_key', fbind('knspace_key', clear)))
if not parent_key:
self.__last_knspace = knspace
return knspace
append((self, parent_key, fbind(parent_key, clear)))
parent = getattr(self, parent_key, None)
while parent is not None:
fbind = parent.fbind
parent_knspace = getattr(parent, 'knspace', 0)
if parent_knspace is not 0:
append((parent, 'knspace', fbind('knspace', clear)))
self.__last_knspace = parent_knspace
return parent_knspace
append((parent, parent_key, fbind(parent_key, clear)))
new_parent = getattr(parent, parent_key, None)
if new_parent is parent:
break
parent = new_parent
self.__last_knspace = knspace
return knspace
def _get_knspace(self):
_knspace = self._knspace
if _knspace is not None:
return _knspace
if self.__callbacks is not None:
return self.__last_knspace
# we only get here if we never accessed our knspace
return self.__set_parent_knspace()
def _set_knspace(self, value):
if value is self._knspace:
return
knspace = self._knspace or self.__last_knspace
name = self.name
if name and knspace:
setattr(knspace, name, None) # reset old namespace
if value == 'clone':
if not knspace:
knspace = self.knspace # get parents in case we haven't before
if knspace:
value = knspace.clone()
else:
raise ValueError('Cannot clone with no namesapce')
for obj, prop_name, uid in self.__callbacks or []:
obj.unbind_uid(prop_name, uid)
self.__last_knspace = self.__callbacks = None
if name:
if value is None: # if None, first update the recursive knspace
knspace = self.__set_parent_knspace()
if knspace:
setattr(knspace, name, self)
self._knspace = None # cause a kv trigger
else:
setattr(value, name, self)
knspace = self._knspace = value
if not knspace:
raise ValueError('Object has name "{}", but no namespace'.
format(name))
else:
if value is None:
self.__set_parent_knspace() # update before trigger below
self._knspace = value
knspace = AliasProperty(
_get_knspace, _set_knspace, bind=('_knspace', ), cache=False,
rebind=True, allownone=True)
'''The namespace instance, :class:`KNSpace`, associated with this widget.
When this widget is named with :attr:`name` the name is added to the
:attr:`knspace` namespace pointing to this widget.
If the namespace has been set with a :class:`KNSpace` instance, e.g. with
`self.knspace = ...`, then that instance is used. Otherwise, we look at
the property named :attr:`knspace_key` of this obj. If that object has a
knspace property we use that namespace. Otherwise, we look at its
:attr:`knspace_key` object and walk up the parent tree until we find
a parent who has a namespace instance. Finally, if there's no parent with
a namespace, the default :attr:`~cutils.knspace.knspace` namespace is used.
Both `rebind` and `allownone` are `True`.
'''
knspace_key = StringProperty('parent', allownone=True)
'''The name of the property of this instance, to use to find the namespace
associated with this instance. Defaults to `'parent'` so that we'll look
up the parent tree to find the namespace. See :attr:`knspace`.
When `None`, we won't search the parent tree for the namespace.
`allownone` is `True`.
'''
def _get_name(self):
return self._name
def _set_name(self, value):
old_name = self._name
knspace = self.knspace
if old_name and knspace:
setattr(knspace, old_name, None)
self._name = value
if value:
if knspace:
setattr(knspace, value, self)
else:
raise ValueError('Object has name "{}", but no namespace'.
format(value))
name = AliasProperty(_get_name, _set_name, bind=('_name', ), cache=False)
'''The name given to this object. If named, the name will be added to the
associated :attr:`knspace` and will point to the `proxy_ref` of this
object.
When named, one can access this object by e.g. knspace.name, where `name`
is the given name of this instance. See :attr:`knspace` and the module
description for more details.
'''
knspace = KNSpace()
Factory.register('KNSpaceBehavior', cls=KNSpaceBehavior)
| mit | 1,435,365,778,809,909,800 | 35.63789 | 101 | 0.623969 | false |
iNecas/katello | cli/test/katello/tests/core/product/product_promote_test.py | 1 | 3293 | import unittest
from mock import Mock
import os
from katello.tests.core.action_test_utils import CLIOptionTestCase, CLIActionTestCase
from katello.tests.core.organization import organization_data
from katello.tests.core.product import product_data
from katello.tests.core.provider import provider_data
from katello.tests.core.repo import repo_data
import katello.client.core.product
from katello.client.core.product import Promote
from katello.client.api.utils import ApiDataError
class RequiredCLIOptionsTests(CLIOptionTestCase):
action = Promote()
disallowed_options = [
('--org=ACME', '--name=product_1'),
]
allowed_options = [
('--org=ACME', '--name=product_1', '--environment=env_1')
]
class ProductPromoteTest(CLIActionTestCase):
ORG = organization_data.ORGS[0]
ENV = organization_data.ENVS[0]
PROV = provider_data.PROVIDERS[2]
PROD = product_data.PRODUCTS[0]
CSET = product_data.EMPTY_CHANGESET
TMP_CHANGESET_NAME = 'tmp_changeset_name'
TYPE = 'PROMOTION'
OPTIONS = {
'org': ORG['name'],
'name': PROD['name'],
'environment': ENV['name']
}
def setUp(self):
self.set_action(Promote())
self.set_module(katello.client.core.product)
self.mock_printer()
self.mock_options(self.OPTIONS)
self.mock(self.action.csapi, 'create', self.CSET)
self.mock(self.action.csapi, 'add_content')
self.mock(self.action.csapi, 'apply', repo_data.SYNC_RESULT_WITHOUT_ERROR)
self.mock(self.action.csapi, 'delete')
self.mock(self.action, 'create_cs_name', self.TMP_CHANGESET_NAME)
self.mock(self.module, 'get_environment', self.ENV)
self.mock(self.module, 'get_product', self.PROD)
self.mock(self.module, 'run_spinner_in_bg', repo_data.SYNC_RESULT_WITHOUT_ERROR)
def tearDown(self):
self.restore_mocks()
def test_it_finds_the_environment(self):
self.run_action()
self.module.get_environment.assert_called_once_with(self.ORG['name'], self.ENV['name'])
def test_it_returns_with_error_when_no_environment_found(self):
self.mock(self.module, 'get_environment').side_effect = ApiDataError()
self.run_action(os.EX_DATAERR)
def test_it_finds_the_product(self):
self.run_action()
self.module.get_product.assert_called_once_with(self.ORG['name'], self.PROD['name'])
def test_it_returns_with_error_when_no_product_found(self):
self.mock(self.module, 'get_product').side_effect = ApiDataError()
self.run_action(os.EX_DATAERR)
def test_it_creates_new_changeset(self):
self.run_action()
self.action.csapi.create.assert_called_once_with(self.ORG['name'], self.ENV['id'], self.TMP_CHANGESET_NAME, self.TYPE)
def test_it_updates_the_changeset(self):
self.run_action()
self.action.csapi.add_content.assert_called_once_with(self.CSET['id'], 'products',
{'product_id': self.PROD['id']})
def test_it_promotes_the_changeset(self):
self.run_action()
self.action.csapi.apply.assert_called_once_with(self.CSET['id'])
def test_waits_for_promotion(self):
self.run_action()
self.module.run_spinner_in_bg.assert_called_once()
| gpl-2.0 | -6,475,309,587,878,731,000 | 32.262626 | 126 | 0.668084 | false |
takashi-suehiro/rtmtools | rtc_handle_example/rtc_handle/rtc_handle_1.0.py | 1 | 16845 | #/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
# import sys
from omniORB import CORBA, URI
# from omniORB import any
from omniORB import any, cdrMarshal, cdrUnmarshal
import OpenRTM_aist
import RTC
from CorbaNaming import *
import SDOPackage
# from EmbryonicRtc import *
# class RtmEnv :
# rtm environment manager
# orb, naming service, rtc proxy list
#
class RtmEnv :
def __init__(self, orb_args, nserver_names=["localhost"],
orb=None, naming=None):
if not orb :
orb = CORBA.ORB_init(orb_args)
self.orb = orb
self.name_space = {}
if naming : # naming can specify only one naming service
self.name_space['default']=NameSpace(orb, naming=naming)
else :
for ns in nserver_names :
self.name_space[ns]=NameSpace(orb, server_name=ns)
def __del__(self):
self.orb.shutdown(wait_for_completion=CORBA.FALSE)
self.orb.destroy()
#
# class NameSpace :
# rtc_handles and object list in naming service
#
class NameSpace :
def __init__(self, orb, server_name=None, naming=None):
self.orb = orb
self.name = server_name
if naming :
self.naming = naming
else :
self.naming = CorbaNaming(self.orb, server_name)
self.b_len = 10 # iteration cut off no.
self.rtc_handles = {}
self.obj_list = {}
def get_object_by_name(self, name, cl=RTC.RTObject):
ref = self.naming.resolveStr(name)
if ref is None: return None # return CORBA.nil ?
if cl :
return ref._narrow(cl)
else :
return ref
def list_obj(self) :
self.rtc_handes = {}
self.obj_list = {}
return self.list_obj1(self.naming._rootContext, "")
def list_obj1(self, name_context, parent) :
if not name_context :
name_context = self.naming._rootContext
rslt = []
b_list = name_context.list(self.b_len)
for bd in b_list[0] :
rslt = rslt + self.proc_bd(bd, name_context, parent)
if b_list[1] : # iterator : there exists remaining.
t_list = b_list[1].next_n(self.b_len)
while t_list[0] :
for bd in t_list[1] :
rslt = rslt + self.proc_bd(bd, name_context, parent)
t_list = b_list[1].next_n(self.b_len)
return rslt
def proc_bd(self, bd, name_context, parent) :
# print '-------------------------------------------------------------------'
# print 'bd= ', bd
# print 'name_context= ', name_context
# print 'parent= ', parent
rslt = []
pre = ""
if parent :
pre = parent + "/"
nam = pre + URI.nameToString(bd.binding_name)
if bd.binding_type == CosNaming.nobject :
tmp = name_context.resolve(bd.binding_name)
self.obj_list[nam]=tmp
print 'objcet '+nam+' was listed.'
try :
tmp = tmp._narrow(RTC.RTObject)
except :
print nam+' is not RTC.'
tmp = None
try :
if tmp :
rslt = [[nam, tmp]]
self.rtc_handles[nam]=RtcHandle(nam,self,tmp)
print 'handle for '+nam+' was created.'
else :
pass
except :
print nam+' is not alive.'
pass
else :
tmp = name_context.resolve(bd.binding_name)
tmp = tmp._narrow(CosNaming.NamingContext)
rslt = self.list_obj1(tmp, nam)
return rslt
#
# data conversion
#
def nvlist2dict(nvlist) :
rslt = {}
for tmp in nvlist :
rslt[tmp.name]=tmp.value.value() # nv.value and any.value()
return rslt
def dict2nvlist(dict) :
rslt = []
for tmp in dict.keys() :
rslt.append(SDOPackage.NameValue(tmp, any.to_any(dict[tmp])))
return rslt
#
# connector, port, inport, outport, service
#
class Connector :
def __init__(self, plist, name = None, id="", prop_dict={}) :
self.connectp=False
self.plist = plist
self.port_reflist = [tmp.port_profile.port_ref for tmp in plist]
if name :
self.name = name
else :
self.name = string.join([tmp.name for tmp in plist],'_')
self.prop_dict_req = prop_dict
self.prop_nvlist_req = dict2nvlist(self.prop_dict_req)
self.profile_req = RTC.ConnectorProfile(self.name, id, self.port_reflist,
self.prop_nvlist_req)
self.nego_prop()
def nego_prop(self) :
self.possible = True
for kk in self.def_prop :
if kk in self.prop_dict_req :
if not self.prop_dict_req[kk] :
self.prop_dict_req[kk]=self.def_prop[kk]
else :
self.prop_dict_req[kk]=self.def_prop[kk]
for pp in self.plist :
if not ((self.prop_dict_req[kk] in pp.prop[kk]) or
('Any' in pp.prop[kk])) :
print kk, self.prop_dict_req[kk]
self.prop_dict_req[kk] = ""
self.possible = False
self.prop_nvlist_req = dict2nvlist(self.prop_dict_req)
self.profile_req.properties = self.prop_nvlist_req
return self.possible
def connect(self) :
#
# out and inout parameters are retuned as a tuple
#
if self.connectp == False :
ret, self.profile = self.port_reflist[0].connect(self.profile_req)
self.prop_nvlist = self.profile.properties
self.prop_dict = nvlist2dict(self.prop_nvlist)
if ret == RTC.RTC_OK :
self.connectp=True
else :
ret = "?"
return ret
def disconnect(self) :
if self.connectp == True :
ret = self.port_reflist[0].disconnect(self.profile.connector_id)
else :
ret = "?"
self.connectp = False
return ret
class IOConnector(Connector) :
def __init__(self, plist, name = None, id="", prop_dict={}) :
# self.def_prop = {'dataport.dataflow_type':'Push' ,
# 'dataport.interface_type':'CORBA_Any' ,
# 'dataport.subscription_type':'Flush'}
self.def_prop = {'dataport.dataflow_type':'push',
'dataport.interface_type':'corba_cdr' ,
'dataport.subscription_type':'flush'}
Connector.__init__(self, plist, name, id, prop_dict)
class ServiceConnector(Connector) :
def __init__(self, plist, name = None, id="", prop_dict={}) :
self.def_prop = {'port.port_type':'CorbaPort' }
Connector.__init__(self, plist, name, id, prop_dict)
class Port :
def __init__(self, profile,nv_dict=None,handle=None) :
self.handle=handle
self.name=profile.name
self.port_profile = profile
if not nv_dict :
nv_dict = nvlist2dict(profile.properties)
self.prop = nv_dict
self.con = None # this must be set in each subclasses
def get_info(self) :
self.con.connect()
tmp1 = self.get_connections()
tmp2 = [pp.connector_id for pp in tmp1]
if self.con.profile.connector_id in tmp2 :
print "connecting"
self.con.disconnect()
def get_connections(self) :
return self.port_profile.port_ref.get_connector_profiles()
class CorbaServer :
def __init__(self, profile, port) :
self.profile = profile
self.port = port
self.name = profile.instance_name
self.type = profile.type_name
self.ref = None
ref_key = 'port.' + self.type + '.' + self.name
self.ref=self.port.con.prop_dict[ref_key]
if isinstance(self.ref,str) :
self.ref=port.handle.env.orb.string_to_object(self.ref)
#
# if we import stubs before we create instances,
# we rarely need to narrow the object references.
# we need to specify global symbol table to evaluate class symbols.
#
def narrow_ref(self, gls) :
if self.type.find('::') == -1 :
self.narrow_sym = eval('_GlobalIDL.' + self.type, gls)
else :
self.narrow_sym = eval(self.type.replace('::','.'), gls)
self.ref = self.ref._narrow(self.narrow_sym)
class CorbaClient :
def __init__(self, profile) :
self.profile = profile
self.name = profile.instance_name
self.type = profile.type_name
#
# to connect to an outside corba client,
# we need an implementation of the corresponding corba server.
# but ....
#
class RtcService(Port) :
def __init__(self, profile,nv_dict=None, handle=None) :
Port.__init__(self, profile, nv_dict, handle)
self.con = ServiceConnector([self])
self.get_info()
self.provided={}
self.required={}
tmp = self.port_profile.interfaces
for itf in tmp :
if itf.polarity == RTC.PROVIDED :
self.provided[itf.instance_name] = CorbaServer(itf,self)
elif itf.polarity == RTC.REQUIRED :
self.required[itf.instance_name] = CorbaClient(itf)
# def open(self) :
# self.con.connect()
# self.provided={}
# self.required={}
# tmp = self.port_profile.interfaces
# for itf in tmp :
# if itf.polarity == RTC.PROVIDED :
# self.provided[itf.instance_name] = CorbaServer(itf,self)
# elif itf.polarity == RTC.REQUIRED :
# self.required[itf.instance_name] = CorbaClient(itf)
# def close(self) :
# return self.con.disconnect()
class RtcInport(Port) :
def __init__(self, profile, nv_dict=None, handle=None) :
Port.__init__(self, profile, nv_dict, handle)
self.con = IOConnector([self], prop_dict={'dataport.dataflow_type':'push'})
self.get_info()
# self.ref = self.con.prop_dict['dataport.corba_any.inport_ref']
self.ref = self.con.prop_dict['dataport.corba_cdr.inport_ref']
self.data_class = eval('RTC.' + self.prop['dataport.data_type'])
self.data_tc = eval('RTC._tc_' + self.prop['dataport.data_type'])
def write(self,data) :
# self.ref.put(CORBA.Any(self.data_tc,
# self.data_class(RTC.Time(0,0),data)))
self.ref.put(cdrMarshal(self.data_tc,
self.data_class(RTC.Time(0,0),data), 1))
def open(self) :
self.con.connect()
self.ref = self.con.prop_dict['dataport.corba_cdr.inport_ref']
def close(self) :
return self.con.disconnect()
class RtcOutport(Port) :
def __init__(self, profile,nv_dict=None, handle=None) :
Port.__init__(self, profile, nv_dict, handle)
con_prop_dict={'dataport.dataflow_type':'pull',
'dataport.buffer.type':'ringbuffer',
'dataport.buffer.read.empty_policy':'last',
'dataport.buffer.length':'1'}
self.con = IOConnector([self], prop_dict=con_prop_dict)
self.get_info()
# if 'dataport.corba_any.outport_ref' in self.con.prop_dict :
# self.ref = self.con.prop_dict['dataport.corba_any.outport_ref']
if 'dataport.corba_cdr.outport_ref' in self.con.prop_dict :
self.ref = self.con.prop_dict['dataport.corba_cdr.outport_ref']
else :
self.ref=None
self.data_class = eval('RTC.' + self.prop['dataport.data_type'])
self.data_tc = eval('RTC._tc_' + self.prop['dataport.data_type'])
def read(self) :
if self.ref :
try :
tmp1=self.ref.get()
tmp2= cdrUnmarshal(self.data_tc,tmp1[1], 1)
# return tmp2.data
return tmp2
except :
return None
else :
print "not supported"
return None
def open(self) :
self.con.connect()
if 'dataport.corba_cdr.outport_ref' in self.con.prop_dict :
self.ref = self.con.prop_dict['dataport.corba_cdr.outport_ref']
def close(self) :
return self.con.disconnect()
#
# RtcHandle
#
class RtcHandle :
def __init__(self, name, env, ref=None) :
self.name = name
self.env = env
if ref :
self.rtc_ref = ref
else :
self.rtc_ref = env.naming.resolve(name)._narrow(RTC.RTObject)
self.conf_ref = None
self.retrieve_info()
def retrieve_info(self) :
self.conf_set={}
self.conf_set_data={}
self.port_refs = []
self.execution_contexts =[]
if self.rtc_ref :
self.conf_ref = self.rtc_ref.get_configuration()
conf_set = self.conf_ref.get_configuration_sets()
for cc in conf_set :
self.conf_set[cc.id]=cc
self.conf_set_data[cc.id]=nvlist2dict(cc.configuration_data)
self.profile = self.rtc_ref.get_component_profile()
self.prop = nvlist2dict(self.profile.properties)
#self.execution_contexts = self.rtc_ref.get_contexts()
self.execution_contexts = self.rtc_ref.get_owned_contexts()
self.port_refs = self.rtc_ref.get_ports()
# this includes inports, outports and service ports
self.ports = {}
self.services = {}
self.inports = {}
self.outports = {}
for pp in self.port_refs :
tmp = pp.get_port_profile()
tmp_prop = nvlist2dict(tmp.properties)
tmp_name = tmp.name.lstrip(self.name.split('.')[0]).lstrip('.')
print 'port_name:', tmp_name
# self.ports[tmp.name]=Port(tmp, tmp_prop)
if tmp_prop['port.port_type']=='DataInPort' :
self.inports[tmp_name]=RtcInport(tmp,tmp_prop, self)
# self.inports[tmp.name]=Port(tmp, tmp_prop)
elif tmp_prop['port.port_type']=='DataOutPort' :
self.outports[tmp_name]=RtcOutport(tmp, tmp_prop, self)
# self.outports[tmp.name]=Port(tmp, tmp_prop)
elif tmp_prop['port.port_type']=='CorbaPort' :
self.services[tmp_name]=RtcService(tmp, tmp_prop, self)
# self.services[tmp.name]=Port(tmp, tmp_prop)
def set_conf(self,conf_set_name,param_name,value) :
conf_set=self.conf_set[conf_set_name]
conf_set_data=self.conf_set_data[conf_set_name]
conf_set_data[param_name]=value
conf_set.configuration_data=dict2nvlist(conf_set_data)
# self.conf_ref.set_configuration_set_values(conf_set_name,conf_set)
self.conf_ref.set_configuration_set_values(conf_set)
def set_conf_activate(self,conf_set_name,param_name,value) :
self.set_conf(conf_set_name,param_name,value)
self.conf_ref.activate_configuration_set(conf_set_name)
def activate(self):
return self.execution_contexts[0].activate_component(self.rtc_ref)
def deactivate(self):
return self.execution_contexts[0].deactivate_component(self.rtc_ref)
def reset(self):
return self.execution_contexts[0].reset_component(self.rtc_ref)
def get_state(self):
return self.execution_contexts[0].get_component_state(self.rtc_ref)
#
# pipe
# a pipe is an port (interface & implementation)
# for a port(an RtcInport or RtcOutport object) of an outside rtc.
# you need an empty rtc (comp) to create pipes.
# you can subscribe and communicate to the outside port with the pipe.
#
#
class InPipe() :
def __init__(self,comp, port) :
self.comp=comp
self.port=port
self.pname=port.name.replace('.','_')
self.pipe=comp.makeOutPort(self.pname,port.data_class(RTC.Time(0,0),[]),OpenRTM_aist.RingBuffer(1))
self.buf=getattr(comp,'_d_'+self.pname)
tmp = self.pipe.getPortProfile()
self.pipe_port = RtcOutport(tmp, nvlist2dict(tmp.properties))
self.con = IOConnector([self.pipe_port,self.port])
def connect(self):
return self.con.connect()
def disconnect(self):
return self.con.disconnect()
def write(self, data) :
self.buf.data=data
self.pipe.write()
class OutPipe() :
def __init__(self,comp, port) :
self.comp=comp
self.port=port
self.pname=port.name.replace('.','_')
self.pipe=comp.makeInPort(self.pname,port.data_class(RTC.Time(0,0),[]),OpenRTM_aist.RingBuffer(1))
self.buf=getattr(comp,'_d_'+self.pname)
tmp = self.pipe.getPortProfile()
self.pipe_port = RtcInport(tmp, nvlist2dict(tmp.properties))
self.con = IOConnector([self.pipe_port,self.port])
def connect(self):
return self.con.connect()
def disconnect(self):
return self.con.disconnect()
def read(self) :
return self.pipe.read().data
#
#
#
def make_pipe(comp, handle) :
handle.in_pipe={}
for i_port in handle.inports :
handle.in_pipe[i_port]=InPipe(comp, handle.inports[i_port])
handle.out_pipe={}
for o_port in handle.outports :
handle.out_pipe[o_port]=OutPipe(comp, handle.outports[o_port])
| mit | -8,554,086,833,814,778,000 | 34.840426 | 103 | 0.581656 | false |
nikdoof/django-eveigb | test_project/settings.py | 1 | 5384 | # Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.db3', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '29)2ec_!4fy$mb0c+u7sz5-q84@tjp(b!atfh-3v@0^c9c=do*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'eveigb',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | 2,804,299,625,662,578,700 | 33.292994 | 127 | 0.685921 | false |
eayunstack/oslo.messaging | tests/utils.py | 1 | 2075 | # Copyright 2010-2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities used in testing"""
import six
from oslo.config import cfg
from oslotest import base
from oslotest import moxstubout
TRUE_VALUES = ('true', '1', 'yes')
class BaseTestCase(base.BaseTestCase):
def setUp(self, conf=cfg.CONF):
super(BaseTestCase, self).setUp()
from oslo.messaging import conffixture
self.messaging_conf = self.useFixture(conffixture.ConfFixture(conf))
self.messaging_conf.transport_driver = 'fake'
self.conf = self.messaging_conf.conf
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the tearDown() method.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
| apache-2.0 | 8,998,579,657,641,622,000 | 33.583333 | 78 | 0.701687 | false |
NinjaMSP/crossbar | crossbar/router/test/test_testament.py | 2 | 8815 | #####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import
import six
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from .helpers import make_router_and_realm, connect_application_session
from crossbar._logging import LogCapturer
class TestamentTests(unittest.TestCase):
def setUp(self):
self.logs = LogCapturer()
self.logs.__enter__()
self.addCleanup(lambda: self.logs.__exit__(None, None, None))
def test_destroy_testament_sent_on_destroy(self):
"""
If one session calls wamp.session.add_testament and then the session is
destroyed, the message it filed as a testament will be sent to
subscribers of the chosen topic.
"""
router, server_factory, router_factory = make_router_and_realm()
class ObservingSession(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
self.events = []
self.s = yield self.subscribe(
lambda *a, **kw: self.events.append({'args': a, 'kwargs': kw}),
u'com.test.destroyed')
session, pump = connect_application_session(server_factory,
ApplicationSession)
ob_session, ob_pump = connect_application_session(server_factory,
ObservingSession)
d = session.call(u"wamp.session.add_testament", u"com.test.destroyed",
[u'hello'], {})
pump.flush()
# Make sure it returns a publication ID
self.assertIsInstance(self.successResultOf(d), six.integer_types)
# No testament sent yet
pump.flush()
ob_pump.flush()
self.assertEqual(ob_session.events, [])
# Then leave...
session.leave()
pump.flush()
ob_pump.flush()
# Testament is sent
self.assertEqual(ob_session.events,
[{'args': (u"hello",), 'kwargs': {}}])
def test_destroy_testament_not_sent_when_cleared(self):
"""
If one session calls wamp.session.add_testament, then the same session
calls wamp.session.flush_testaments, and then the session is destroyed,
the message it filed as a testament will not be sent, as it was
deleted.
"""
router, server_factory, router_factory = make_router_and_realm()
class ObservingSession(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
self.events = []
self.s = yield self.subscribe(
lambda *a, **kw: self.events.append({'args': a, 'kwargs': kw}),
u'com.test.destroyed')
session, pump = connect_application_session(server_factory,
ApplicationSession)
ob_session, ob_pump = connect_application_session(server_factory,
ObservingSession)
d = session.call(u"wamp.session.add_testament", u"com.test.destroyed",
[u'hello'], {})
pump.flush()
# Make sure it returns an integer (the testament event publication ID)
self.assertIsInstance(self.successResultOf(d), six.integer_types)
# No testament sent yet
pump.flush()
ob_pump.flush()
self.assertEqual(ob_session.events, [])
# Flush the testament
d = session.call(u"wamp.session.flush_testaments")
pump.flush()
# Make sure it returns flushed count 1
self.assertEqual(self.successResultOf(d), 1)
# Then leave...
session.leave()
pump.flush()
ob_pump.flush()
# No testaments were sent
self.assertEqual(ob_session.events, [])
def test_add_testament_needs_valid_scope(self):
"""
Only 'detatched' and 'destroyed' are valid scopes for add_testament.
"""
router, server_factory, router_factory = make_router_and_realm()
session, pump = connect_application_session(server_factory,
ApplicationSession)
d = session.call(u"wamp.session.add_testament", u"com.test.destroyed",
[u'hello'], {}, scope=u"bar")
pump.flush()
# Make sure it returns a failure
failure = self.failureResultOf(d)
self.assertEqual(failure.value.args,
(u"scope must be destroyed or detatched",))
def test_flush_testament_needs_valid_scope(self):
"""
Only 'detatched' and 'destroyed' are valid scopes for flush_testament.
"""
router, server_factory, router_factory = make_router_and_realm()
session, pump = connect_application_session(server_factory,
ApplicationSession)
d = session.call(u"wamp.session.flush_testaments", scope=u"bar")
pump.flush()
# Make sure it returns a failure
failure = self.failureResultOf(d)
self.assertEqual(failure.value.args,
(u"scope must be destroyed or detatched",))
def test_one_scope_does_not_affect_other(self):
"""
Adding a testament to one scope and flushing the other maintains the
added testament.
"""
router, server_factory, router_factory = make_router_and_realm()
class ObservingSession(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
self.events = []
self.s = yield self.subscribe(
lambda *a, **kw: self.events.append({'args': a, 'kwargs': kw}),
u'com.test.dc')
session, pump = connect_application_session(server_factory,
ApplicationSession)
ob_session, ob_pump = connect_application_session(server_factory,
ObservingSession)
# Add a destroyed testament
d = session.call(u"wamp.session.add_testament", u"com.test.dc",
[u'destroyed'], {}, scope=u"destroyed")
pump.flush()
self.assertIsInstance(self.successResultOf(d), six.integer_types)
# Add a detatched testament
d = session.call(u"wamp.session.add_testament", u"com.test.dc",
[u'detatched'], {}, scope=u"detatched")
pump.flush()
self.assertIsInstance(self.successResultOf(d), six.integer_types)
# No testament sent yet
pump.flush()
ob_pump.flush()
self.assertEqual(ob_session.events, [])
# Flush the destroyed testament
d = session.call(u"wamp.session.flush_testaments", scope=u"destroyed")
pump.flush()
# Make sure it returns number of flushed testaments
self.assertEqual(self.successResultOf(d), 1)
# Then leave...
session.leave()
pump.flush()
ob_pump.flush()
# Just the detatched testament is sent
self.assertEqual(ob_session.events, [{"args": (u'detatched',), "kwargs": {}}])
| agpl-3.0 | 2,491,934,011,154,283,500 | 36.67094 | 86 | 0.586954 | false |
prashrock/Python | leetCode/largest_number/create_largest_number_from_array.py | 1 | 1340 | # Use a custom sort comparator to sort the integers
# Converted the sorted integer array into a string
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
# @param x, first integer
# @param y, second integer
# @return (xy - yx)
def cmp_aggregate(x, y):
str_xy = ''.join((str(x), str(y)))
str_yx = ''.join((str(y), str(x)))
return int(str_xy) - int(str_yx)
#Sort with a custom comparator and get descending order
def largestNumber(num):
sorted_num = sorted(num, key=cmp_to_key(cmp_aggregate), reverse=True)
print sorted_num
sorted_str = ''.join(map(str, sorted_num))
if(int(sorted_str) == 0): return '0'
else: return sorted_str
num = [3, 30, 34, 5, 9]
print num
print largestNumber(num)
| gpl-2.0 | 4,418,024,888,891,023,400 | 31.682927 | 73 | 0.581343 | false |
emulbreh/lymph | lymph/core/events.py | 1 | 3099 | import re
import logging
from lymph.core.interfaces import Component
from lymph.core import trace
logger = logging.getLogger(__name__)
class Event(object):
def __init__(self, evt_type, body, source=None, headers=None, event_id=None):
self.event_id = event_id
self.evt_type = evt_type
self.body = body
self.source = source
self.headers = headers or {}
def __getitem__(self, key):
return self.body[key]
def __iter__(self):
return iter(self.body)
def __repr__(self):
return '<Event type=%r body=%r>' % (self.evt_type, self.body)
def __str__(self):
return '{type=%s id=%s}' % (self.evt_type, self.event_id)
@classmethod
def deserialize(cls, data):
return cls(data.get('type'), data.get('body', {}), source=data.get('source'), headers=data.get('headers'))
def serialize(self):
return {
'type': self.evt_type,
'headers': self.headers,
'body': self.body,
'source': self.source,
}
class EventHandler(Component):
def __init__(self, interface, func, event_types, sequential=False, queue_name=None, active=True):
self.func = func
self.event_types = event_types
self.sequential = sequential
self.active = active
self.interface = interface
self._queue_name = queue_name or func.__name__
@property
def queue_name(self):
return '%s-%s' % (self.interface.name, self._queue_name)
@queue_name.setter
def queue_name(self, value):
self._queue_name = value
def on_start(self):
self.interface.container.subscribe(self, consume=self.active)
def __call__(self, event, *args, **kwargs):
trace.set_id(event.headers.get('trace_id'))
logger.debug('<E %s', event)
return self.func(self.interface, event, *args, **kwargs)
class EventDispatcher(object):
wildcards = {
'#': r'[\w.]*(?=\.|$)',
'*': r'\w+',
}
def __init__(self, patterns=()):
self.patterns = []
self.update(patterns)
def compile(self, key):
words = (self.wildcards.get(word, re.escape(word)) for word in key.split('.'))
return re.compile('^%s$' % r'\.'.join(words))
def register(self, pattern, handler):
self.patterns.append((
self.compile(pattern),
pattern,
handler,
))
def __iter__(self):
for regex, pattern, handler in self.patterns:
yield pattern, handler
def update(self, other):
for pattern, handler in other:
self.register(pattern, handler)
def dispatch(self, evt_type):
for regex, pattern, handler in self.patterns:
if regex.match(evt_type):
yield pattern, handler
def __call__(self, event):
handlers = set()
for pattern, handler in self.dispatch(event.evt_type):
if handler not in handlers:
handlers.add(handler)
handler(event)
return bool(handlers)
| apache-2.0 | 1,420,763,787,660,582,100 | 26.918919 | 114 | 0.571475 | false |
mesocentrefc/Janua-SMS | janua/actions/sms_usage.py | 1 | 2426 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University
#
# This file is part of Janua-SMS
#
# http://github.com/mesocentrefc/Janua-SMS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from janua import jdb
from janua.actions.action import Action
from janua.utils.utilities import get_role
from janua.ws.services import urlconfig, jsonify
class SmsUsage(Action):
"""
Get SMS usage based on administrator quota
* Sample request with administrator level:
.. code-block:: javascript
GET /sms-usage HTTP/1.1
Host: janua.mydomain.com
Content-Type: application/json
JanuaAuthToken: abcdef123456789
Sample response:
.. code-block:: javascript
HTTP/1.1 200
{
"smsusage": {
"global": 18,
"quota": "100 M",
"sent": 18
}
}
* Sample request with supervisor level:
.. code-block:: javascript
GET /sms-usage HTTP/1.1
Host: janua.mydomain.com
Content-Type: application/json
Sample response:
.. code-block:: javascript
HTTP/1.1 200
{
"smsusage": {
"quota": "200 D",
"sent": 4
}
}
"""
category = '__INTERNAL__'
@urlconfig('/sms-usage')
def web(self):
admin = jdb.admin.get_by_phone(self.phone_number)
data = {
'success': True,
'params': [],
'num_params': 0
}
reached, numsms = jdb.sms.is_admin_quota_reached(admin)
quota = admin.sms_quota
data = {'sent': int(numsms), 'quota': quota}
if get_role(admin) == 'admin':
data.update({'global': int(jdb.sms.month_usage())})
return jsonify(smsusage=data)
| gpl-2.0 | 3,349,284,974,751,728,000 | 25.347826 | 76 | 0.60066 | false |
Dziolas/invenio-oaiserver | tests/test_invenio_oaiserver.py | 1 | 1878 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Module tests."""
from __future__ import absolute_import, print_function
from flask import Flask
from flask_babelex import Babel
from invenio_oaiserver import InvenioOAIServer
def test_version():
"""Test version import."""
from invenio_oaiserver import __version__
assert __version__
def test_init():
"""Test extension initialization."""
app = Flask('testapp')
ext = InvenioOAIServer(app)
assert 'invenio-oaiserver' in app.extensions
app = Flask('testapp')
ext = InvenioOAIServer()
assert 'invenio-oaiserver' not in app.extensions
ext.init_app(app)
assert 'invenio-oaiserver' in app.extensions
def test_view(app):
"""Test view."""
Babel(app)
InvenioOAIServer(app)
with app.test_client() as client:
res = client.get("/")
assert res.status_code == 200
assert 'Welcome to Invenio-OAIServer' in str(res.data)
| gpl-2.0 | -5,224,454,211,882,588,000 | 29.290323 | 76 | 0.714058 | false |
nathanbjenx/cairis | cairis/controllers/TemplateGoalController.py | 1 | 3319 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
if (sys.version_info > (3,)):
import http.client
from http.client import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
else:
import httplib
from httplib import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
from flask import session, request, make_response
from flask_restful import Resource
from cairis.data.TemplateGoalDAO import TemplateGoalDAO
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.MessageDefinitions import TemplateGoalMessage
from cairis.tools.ModelDefinitions import TemplateGoalModel
from cairis.tools.SessionValidator import get_session_id
__author__ = 'Shamal Faily'
class TemplateGoalsAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = TemplateGoalDAO(session_id)
tgs = dao.get_template_goals(constraint_id=constraint_id)
dao.close()
resp = make_response(json_serialize(tgs, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
def post(self):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
new_tg = dao.from_json(request)
dao.add_template_goal(new_tg)
dao.close()
resp_dict = {'message': 'Template Goal successfully added'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class TemplateGoalByNameAPI(Resource):
def get(self, name):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
found_tg = dao.get_template_goal(name)
dao.close()
resp = make_response(json_serialize(found_tg, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
def put(self, name):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
upd_tg = dao.from_json(request)
dao.update_template_goal(upd_tg, name)
dao.close()
resp_dict = {'message': 'Template Goal successfully updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
def delete(self, name):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
dao.delete_template_goal(name)
dao.close()
resp_dict = {'message': 'Template Goal successfully deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
| apache-2.0 | 2,627,098,913,465,853,000 | 32.525253 | 78 | 0.726424 | false |
andyr0id/PyGFNN | examples/gfnn/example1F.py | 1 | 1657 | #!/usr/bin/env python
__author__ = 'Andrew J. Lambert, [email protected]'
"""
example1P
A one layer network with fixed internal connections
"""
from pygfnn.tools.plotting.gfnn import *
import pygfnn.tools.shortcuts as gfnn
import numpy as np
import timeit
import matplotlib.pyplot as plt
import scipy.io as sio
if __name__ == '__main__':
# Network parameters
oscParams = { 'a': 1, 'b1': -1, 'b2': -1000, 'd1': 0, 'd2': 0, 'e': 1 } # Limit cycle
learnParams = gfnn.NOLEARN_ALLFREQ
freqDist = { 'fspac': 'log', 'min': 0.5, 'max': 8 }
# Make network
n = gfnn.buildGFNN(196, oscParams = oscParams, freqDist = freqDist,
learnParams = learnParams)
n.recurrentConns[0].c0[:] = gfnn.getInitC(n, n, [(1,1), (1,2), (1,3), (1,4), (1,6), (1,8), (2,3), (3,4), (3,8)], thresh=0.01)
n.reset()
# First plots, showing initial connection state
ampFig1, phaseFig1 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max'])
# Stimulus - 50 seconds of 1Hz sin
t = np.arange(0, 50, n['h'].dt)
x = np.sin(2 * np.pi * 1 * t) * 0.1
# Run the network
timer = timeit.default_timer
start = timer()
for i in range(len(t)):
out = n.activate(x[i])
end = timer()
print('Elapsed time is %f seconds' % (end - start))
if learnParams is not None:
# Second plots, showing final connection state
ampFig2, phaseFig2 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max'])
Z = n['h'].outputbuffer[:n.offset]
fig1 = ampx(Z, n.dt, freqDist['min'], freqDist['max'])
fig2 = phasex(Z, n.dt, freqDist['min'], freqDist['max'])
plt.show()
| gpl-2.0 | 3,656,434,342,488,919,600 | 29.685185 | 129 | 0.608328 | false |
liberiun/cynin-intranet | src/ubify.viewlets/ubify/viewlets/browser/typetitle.py | 1 | 3657 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at [email protected] with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# [email protected]
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
from Products.CMFCore.utils import getToolByName
from ubify.viewlets.config import plone_site_type_title
from ubify.policy import CyninMessageFactory as _
class TypetitleViewlet(ViewletBase):
render = ViewPageTemplateFile('typetitle.pt')
def update(self):
portal_state = getMultiAdapter((self.context, self.request),name=u'plone_portal_state')
context_state = getMultiAdapter((self.context, self.request),name=u'plone_context_state')
tools = getMultiAdapter((self.context, self.request), name=u'plone_tools')
typetool= getToolByName(self.context, 'portal_types')
portal_title = portal_state.portal_title()
object_title = context_state.object_title()
self.object_icon = self.context.icon
object_typename = self.context.portal_type
object_typeobj = typetool[object_typename]
self.typeiconname = object_typeobj.icon_expr
if object_typeobj.title == '' and self.context.portal_type.lower() == 'plone site':
self.typetitle = plone_site_type_title
else:
self.typetitle = _(object_typeobj.title,object_typeobj.title)
self.app_name = object_title
if self.context.portal_type.lower() == 'plone site':
self.tdescription = 'cyn.in site|A cyn.in site allows instant collaboration among peers and provides a central social computer and network.'
else:
self.tdescription = self.typetitle + '|' + object_typeobj.description
self.isaddscreen = False
if hasattr(context_state.parent(),'portal_type') and context_state.parent().portal_type == 'TempFolder':
self.isaddscreen = True
| gpl-3.0 | 1,373,551,312,006,338,000 | 50.507042 | 152 | 0.710965 | false |
code-for-india/sahana_shelter_worldbank | controllers/vol.py | 1 | 40041 | # -*- coding: utf-8 -*-
"""
Volunteer Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Dashboard """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to Volunteers Summary
redirect(URL(f="volunteer", args=["summary"]))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined
Used for Summary view, Imports, S3AddPersonWidget2 and the service record
"""
# Custom method for Service Record
s3db.set_method("hrm", "human_resource",
method = "form",
action = s3db.vol_service_record)
return s3db.hrm_human_resource_controller()
# -----------------------------------------------------------------------------
def volunteer():
""" Volunteers Controller """
# Volunteers only
s3.filter = s3base.S3FieldSelector("type") == 2
vol_experience = settings.get_hrm_vol_experience()
def prep(r):
resource = r.resource
get_config = resource.get_config
# CRUD String
s3.crud_strings[resource.tablename] = s3.crud_strings["hrm_volunteer"]
# Default to volunteers
table = r.table
table.type.default = 2
# Volunteers use home address
location_id = table.location_id
location_id.label = T("Home Address")
# Configure list_fields
if r.representation == "xls":
# Split person_id into first/middle/last to
# make it match Import sheets
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
]
else:
list_fields = ["person_id",
]
list_fields.extend(("job_title_id",
"organisation_id",
(settings.get_ui_label_mobile_phone(), "phone.value"),
(T("Email"), "email.value"),
"location_id",
))
if settings.get_hrm_use_trainings():
list_fields.append("person_id$training.course_id")
if settings.get_hrm_use_certificates():
list_fields.append("person_id$certification.certificate_id")
# Volunteer Programme and Active-status
report_options = get_config("report_options")
if vol_experience in ("programme", "both"):
# Don't use status field
table.status.readable = table.status.writable = False
# Use active field?
vol_active = settings.get_hrm_vol_active()
if vol_active:
list_fields.insert(3, (T("Active?"), "details.active"))
# Add Programme to List Fields
list_fields.insert(6, "person_id$hours.programme_id")
# Add active and programme to Report Options
report_fields = report_options.rows
report_fields.append("person_id$hours.programme_id")
if vol_active:
report_fields.append((T("Active?"), "details.active"))
report_options.rows = report_fields
report_options.cols = report_fields
report_options.fact = report_fields
else:
# Use status field
list_fields.append("status")
# Update filter widgets
filter_widgets = s3db.hrm_human_resource_filters(
resource_type="volunteer",
hrm_type_opts=s3db.hrm_type_opts)
# Reconfigure
resource.configure(list_fields = list_fields,
filter_widgets = filter_widgets,
report_options = report_options,
)
if r.interactive:
if r.id:
if r.method not in ("profile", "delete"):
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "volunteer"
}
redirect(URL(f="person", vars=vars))
else:
if r.method == "import":
# Redirect to person controller
redirect(URL(f="person",
args="import",
vars={"group": "volunteer"}))
elif not r.component and r.method != "delete":
# Configure AddPersonWidget
table.person_id.widget = S3AddPersonWidget2(controller="vol")
# Show location ID
location_id.writable = location_id.readable = True
# Hide unwanted fields
for fn in ("site_id",
"code",
"department_id",
"essential",
"site_contact",
"status",
):
table[fn].writable = table[fn].readable = False
# Organisation Dependent Fields
set_org_dependent_field = settings.set_org_dependent_field
set_org_dependent_field("pr_person_details", "father_name")
set_org_dependent_field("pr_person_details", "mother_name")
set_org_dependent_field("pr_person_details", "affiliations")
set_org_dependent_field("pr_person_details", "company")
set_org_dependent_field("vol_details", "availability")
set_org_dependent_field("vol_volunteer_cluster", "vol_cluster_type_id")
set_org_dependent_field("vol_volunteer_cluster", "vol_cluster_id")
set_org_dependent_field("vol_volunteer_cluster", "vol_cluster_position_id")
# Label for "occupation"
s3db.pr_person_details.occupation.label = T("Normal Job")
# Assume volunteers only between 12-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
s3.prep = prep
def postp(r, output):
if r.interactive and not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
# Configure action buttons
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules:
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
# Insert field to set the Programme
if vol_experience in ("programme", "both") and \
r.method not in ("search", "report", "import") and \
"form" in output:
# @ToDo: Re-implement using
# http://eden.sahanafoundation.org/wiki/S3SQLForm
# NB This means adjusting IFRC/config.py too
sep = ": "
table = s3db.hrm_programme_hours
field = table.programme_id
default = field.default
widget = field.widget or SQLFORM.widgets.options.widget(field, default)
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
if s3_formstyle == "bootstrap":
label = LABEL(label, label and sep, _class="control-label", _for=field_id)
_controls = DIV(widget, _class="controls")
row = DIV(label, _controls,
_class="control-group",
_id=row_id,
)
output["form"][0].insert(4, row)
elif callable(s3_formstyle):
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
programme = s3_formstyle(row_id, label, widget,
field.comment)
if isinstance(programme, DIV) and \
"form-row" in programme["_class"]:
# Foundation formstyle
output["form"][0].insert(4, programme)
else:
try:
output["form"][0].insert(4, programme[1])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(4, programme[0])
except:
pass
else:
# Unsupported
raise
elif r.representation == "plain":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used to see PR component tabs, for Personal Profile & Imports
- includes components relevant to HRM
"""
configure = s3db.configure
set_method = s3db.set_method
# Custom Method for Contacts
set_method("pr", resourcename,
method = "contacts",
action = s3db.pr_contacts)
# Custom Method for CV
set_method("pr", resourcename,
method = "cv",
action = s3db.hrm_cv)
# Custom Method for HR Record
set_method("pr", resourcename,
method = "record",
action = s3db.hrm_record)
# Plug-in role matrix for Admins/OrgAdmins
realms = auth.user is not None and auth.user.realms or []
if ADMIN in realms or ORG_ADMIN in realms:
set_method("pr", resourcename,
method = "roles",
action = s3base.S3PersonRoleManager())
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person", asset_asset="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
get_vars = request.get_vars
group = get_vars.get("group", "volunteer")
hr_id = get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
table = s3db.hrm_human_resource
table.type.default = 2
get_vars["xsltmode"] = "volunteer"
if hr_id:
hr = db(table.id == hr_id).select(table.type,
limitby=(0, 1)).first()
if hr:
group = hr.type == 2 and "volunteer" or "staff"
# Also inform the back-end of this finding
get_vars["group"] = group
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
configure(tablename,
deletable = False)
mode = session.s3.hrm.mode
if mode is not None:
# Configure for personal mode
s3db.hrm_human_resource.organisation_id.readable = True
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# People can view their own HR data, but not edit it
configure("hrm_human_resource",
insertable = False,
editable = False,
deletable = False)
configure("hrm_certification",
insertable = True,
editable = True,
deletable = True)
configure("hrm_credential",
insertable = False,
editable = False,
deletable = False)
configure("hrm_competency",
insertable = True, # Can add unconfirmed
editable = False,
deletable = False)
configure("hrm_training", # Can add but not provide grade
insertable = True,
editable = False,
deletable = False)
configure("hrm_experience",
insertable = False,
editable = False,
deletable = False)
configure("pr_group_membership",
insertable = False,
editable = False,
deletable = False)
else:
# Configure for HR manager mode
s3.crud_strings[tablename].update(
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details"),
title_upload = T("Import Volunteers"),
)
# Upload for configuration (add replace option)
s3.importerPrep = lambda: dict(ReplaceOption=T("Remove existing data before import"))
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the organisation
before processing a new data import, used for the import_prep
hook in response.s3
"""
resource, tree = data
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if s3.import_replace:
if tree is not None:
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3db.resource("hrm_human_resource", filter=query)
resource.delete(format="xml", cascade=True)
s3.import_prep = import_prep
# CRUD pre-process
def prep(r):
if r.representation == "s3json":
current.xml.show_ids = True
elif r.interactive and r.method != "import":
if not r.component:
table = r.table
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
s3db.pr_person_details.occupation.label = T("Normal Job")
# Organisation Dependent Fields
set_org_dependent_field = settings.set_org_dependent_field
set_org_dependent_field("pr_person", "middle_name")
set_org_dependent_field("pr_person_details", "father_name")
set_org_dependent_field("pr_person_details", "mother_name")
set_org_dependent_field("pr_person_details", "affiliations")
set_org_dependent_field("pr_person_details", "company")
else:
if r.component_name == "hours":
# Exclude records which are just to link to Programme
component_table = r.component.table
filter = (r.component.table.hours != None)
r.resource.add_component_filter("hours", filter)
component_table.training.readable = False
component_table.training_id.readable = False
elif r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
elif r.component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
elif r.component_name == "group_membership":
s3db.hrm_configure_pr_group_membership()
if r.method == "record" or r.component_name == "human_resource":
table = s3db.hrm_human_resource
table.code.writable = table.code.readable = False
table.department_id.writable = table.department_id.readable = False
table.essential.writable = table.essential.readable = False
#table.location_id.readable = table.location_id.writable = True
table.person_id.writable = table.person_id.readable = False
table.site_id.writable = table.site_id.readable = False
table.site_contact.writable = table.site_contact.readable = False
org = session.s3.hrm.org
field = table.organisation_id
if org is None:
field.widget = None
else:
field.default = org
field.readable = field.writable = False
# Organisation Dependent Fields
set_org_dependent_field = settings.set_org_dependent_field
set_org_dependent_field("vol_details", "availability")
set_org_dependent_field("vol_volunteer_cluster", "vol_cluster_type_id")
set_org_dependent_field("vol_volunteer_cluster", "vol_cluster_id")
set_org_dependent_field("vol_volunteer_cluster", "vol_cluster_position_id")
resource = r.resource
if mode is not None:
r.resource.build_query(id=s3_logged_in_person())
elif r.method not in ("deduplicate", "search_ac"):
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="volunteer"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="volunteer",
args=["search"]))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False)
elif r.component_name == "group_membership" and r.representation == "aadata":
s3db.hrm_configure_pr_group_membership()
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both") and \
r.method not in ["search", "report", "import"] and \
"form" in output:
# Insert field to set the Programme
# @ToDo: Re-implement using http://eden.sahanafoundation.org/wiki/S3SQLForm
sep = ": "
table = s3db.hrm_programme_hours
field = table.programme_id
if r.id:
query = (table.person_id == r.id)
default = db(query).select(table.programme_id,
orderby=table.date).last()
if default:
default = default.programme_id
else:
default = field.default
widget = field.widget or SQLFORM.widgets.options.widget(field, default)
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
programme = s3_formstyle(row_id, label, widget,
field.comment)
try:
output["form"][0].insert(2, programme[1])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(2, programme[0])
except:
pass
elif r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
elif r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
# REST Interface
if session.s3.hrm.orgname and mode is None:
orgname = session.s3.hrm.orgname
else:
orgname = None
return s3_rest_controller("pr", resourcename,
csv_template = ("hrm", "volunteer"),
csv_stylesheet = ("hrm", "person.xsl"),
csv_extra_fields = [
dict(label="Type",
field=s3db.hrm_human_resource.type)
],
orgname = orgname,
replace_option = T("Remove existing data before import"),
rheader = s3db.hrm_rheader,
)
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter to just Volunteers
s3.filter = s3base.S3FieldSelector("human_resource.type") == 2
# Only allow use in the search_ac method
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter to just Volunteers
s3.filter = s3base.S3FieldSelector("human_resource.type") == 2
# Only allow use in the search_ac method
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR, but filtered to just 'Relief Teams'
"""
return s3db.hrm_group_controller()
# -----------------------------------------------------------------------------
def group_membership():
"""
Membership controller
- uses the group_membership table from PR
"""
# Change Labels
s3db.hrm_configure_pr_group_membership()
table = db.pr_group_membership
# Amend list_fields
s3db.configure("pr_group_membership",
list_fields=["group_id",
"group_id$description",
"group_head",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
(T("Email"), "person_id$email.value"),
(settings.get_ui_label_mobile_phone(), "person_id$phone.value"),
])
# Only show Relief Teams
# Do not show system groups
# Only show Volunteers
gtable = db.pr_group
htable = s3db.hrm_human_resource
s3.filter = (gtable.system == False) & \
(gtable.group_type == 3) & \
(htable.type == 2) & \
(htable.person_id == table.person_id)
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = request.get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
return True
s3.prep = prep
return s3_rest_controller("pr", "group_membership",
csv_template=("hrm", "group_membership"),
csv_stylesheet=("hrm", "group_membership.xsl"),
)
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_department)
return s3_rest_controller("hrm", resourcename)
# -----------------------------------------------------------------------------
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
s3.filter = s3base.S3FieldSelector("human_resource.type").belongs((2, 3))
if not auth.s3_has_role(ADMIN):
s3.filter &= auth.filter_by_root_org(s3db.hrm_job_title)
return s3_rest_controller("hrm", resourcename,
csv_template=("hrm", "job_title"),
csv_stylesheet=("hrm", "job_title.xsl"),
)
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename,
csv_template=("hrm", "skill"),
csv_stylesheet=("hrm", "skill.xsl"),
)
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename)
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename,
csv_template=("hrm", "competency_rating"),
csv_stylesheet=("hrm", "competency_rating.xsl"),
)
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename)
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
return s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader,
csv_template=("hrm", "course"),
csv_stylesheet=("hrm", "course.xsl"),
)
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename)
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
return s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader,
csv_template=("hrm", "certificate"),
csv_stylesheet=("hrm", "certificate.xsl"),
)
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename)
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
# Filter to just Volunteers
s3.filter = s3base.S3FieldSelector("human_resource.type") == 2
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
table = s3db.hrm_training
table.person_id.widget = S3PersonAutocompleteWidget(controller="vol")
return s3db.hrm_training_event_controller()
# -----------------------------------------------------------------------------
def competency():
""" RESTful CRUD controller used to allow searching for people by Skill"""
# Filter to just Volunteers
s3.filter = s3base.S3FieldSelector("person_id$human_resource.type") == 2
return s3db.hrm_competency_controller()
# -----------------------------------------------------------------------------
def credential():
""" Credentials Controller """
# Filter to just Volunteers
s3.filter = s3base.S3FieldSelector("person_id$human_resource.type") == 2
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller """
# Filter to just Volunteers
s3.filter = s3base.S3FieldSelector("person_id$human_resource.type") == 2
return s3db.hrm_experience_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def programme():
""" Volunteer Programmes controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_programme)
def prep(r):
if r.component_name == "person":
s3db.configure("hrm_programme_hours",
list_fields=["person_id",
"training",
"programme_id",
"date",
"hours",
])
return True
s3.prep = prep
return s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader,
csv_stylesheet = ("hrm", "programme.xsl"),
csv_template = ("hrm", "programme")
)
# -----------------------------------------------------------------------------
def programme_hours():
"""
Volunteer Programme Hours controller
- used for Imports & Reports
"""
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
return s3_rest_controller("hrm", resourcename,
csv_stylesheet=("hrm", "programme_hours.xsl"),
csv_template=("hrm", "programme_hours")
)
# =============================================================================
def award():
""" Volunteer Awards controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def volunteer_award():
"""
Used for returning options to the S3AddResourceLink PopUp
"""
# We use component form instead
#def prep(r):
# if r.method in ("create", "create.popup", "update", "update.popup"):
# # Coming from Profile page?
# person_id = request.get_vars.get("~.person_id", None)
# if person_id:
# field = r.table.person_id
# field.default = person_id
# field.readable = field.writable = False
# return True
#s3.prep = prep
return s3_rest_controller()
# =============================================================================
def cluster_type():
""" Volunteer Cluster Types controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def cluster():
""" Volunteer Clusters controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def cluster_position():
""" Volunteer Group Positions controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def volunteer_cluster():
""" ONLY FOR RETURNING options to the S3AddResourceLink PopUp """
return s3_rest_controller()
# =============================================================================
def task():
""" Tasks controller """
return s3db.project_task_controller()
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| mit | -4,564,152,393,702,601,700 | 38.294406 | 96 | 0.471192 | false |
rosenbrockc/fortpy | fortpy/stats/bp.py | 1 | 5243 | """Methods for testing a code library against Fortran best practices to
help uncover subtle bugs that took a while for us to track down. See
especially http://www.cs.rpi.edu/~szymansk/OOF90/bugs.html"""
def _exec_check_pointers(executable):
"""Checks the specified executable for the pointer condition that not
all members of the derived type have had their values set.
Returns (list of offending members, parameter name).
"""
oparams = []
pmembers = {}
xassigns = map(lambda x: x.lower().strip(), executable.external_assignments())
def add_offense(pname, member):
"""Adds the specified member as an offender under the specified parameter."""
if pname not in oparams:
oparams.append(pname)
if pname not in pmembers:
pmembers[pname] = [member]
else:
pmembers[pname].append(member)
def check_buried(executable, pname, member):
"""Checks whether the member has its value changed by one of the dependency
subroutines in the executable.
"""
for d in executable.dependencies:
if pname in d.argnames:
pindex = d.argnames.index(pname)
dtarget = d.target
if dtarget is not None:
mparam = dtarget.ordered_parameters[pindex]
for pname, param in executable.parameters.items():
if param.direction == "(out)" and param.is_custom:
utype = param.customtype
if utype is None:
continue
for mname, member in utype.members.items():
key = "{}%{}".format(pname, mname).lower().strip()
if key not in xassigns:
#We also need to check the dependency calls to other, buried subroutines.
compname = "{}%{}".format(pname, mname).lower()
if executable.changed(compname) is None:
add_offense(pname, member)
return (oparams, pmembers)
def _type_check_pointers(utype):
"""Checks the user-derived type for non-nullified pointer array declarations
in its base definition.
Returns (list of offending members).
"""
result = []
for mname, member in utype.members.items():
if ("pointer" in member.modifiers and member.D > 0 and
(member.default is None or "null" not in member.default)):
result.append(member)
return result
def check_pointers(parser, codedir=None, mfilter=None, recursive=False):
"""Checks the modules in the specified code parser to see if they
have common, but subtle, pointer bugs in:
1. subroutines with a parameter of intent(out) and user-derived type
must* set *all* members of that parameter or they will have an
*undefined* status.
2. pointer-type arrays that are not nullified are set to a valid target
will return 'T' when passed to `associated`. Best practice is to nullify
pointer arrays in user-derived types as the default value on those types.
:arg parser: [fortpy.code.CodeParser] with the modules to search *already loaded*.
:arg codedir: specify the full path to the library whose modules should be searched,
just another way to filter which modules are generating the warnings.
:arg mfilter: filter to apply to module names; can use the wildcard standard
from bash.
"""
from fnmatch import fnmatch
from fortpy.msg import std, set_verbosity, info
set_verbosity(0)
W1 = " {} '{}' does not set the value of members '{}' in parameter '{}'."
W2 = " Type '{}' does not nullify members '{}' on creation."
offenders = {}
for (modname, module) in parser.modules.items():
if not recursive and codedir is not None and not codedir.lower() in module.filepath.lower():
continue
if mfilter is not None and not fnmatch(module.name.lower(), mfilter.lower()):
continue
#Test the first condition above for all subroutines in the module; also handle
#the recursively defined subroutines.
hprinted = False
for xname, xvalue in module.executables.items():
oparams, pmembers = _exec_check_pointers(xvalue)
if len(oparams) > 0:
if not hprinted:
info("Best practice suggestions: {}".format(module.filepath))
hprinted = True
for oparam in oparams:
plist = ', '.join([p.name for p in pmembers[oparam]])
std(W1.format(type(xvalue).__name__, xname, plist, oparam), 0)
offenders[xvalue.full_name] = (oparams, pmembers)
for tname, tvalue in module.types.items():
result = _type_check_pointers(tvalue)
if len(result) > 0:
if not hprinted:
info("Best practice suggestions: {}".format(module.filepath))
hprinted = True
plist = ', '.join([p.name for p in result])
std(W2.format(tname, plist), 0)
offenders[xvalue.full_name] = result
return offenders
| mit | -214,200,888,926,042,880 | 42.330579 | 100 | 0.60576 | false |
mitoNGS/MToolBox | aux/filter_HF.py | 1 | 2956 | #!/usr/bin/env python
import fileinput
import sys, os
def usage():
print '''
This script is compatible with MToolBox versions < 1.2 only
This script filters the MToolBox vcf file based on Heteroplasmy threshold
Usage:
filter_HF.py <sample_name> <vcf_file> <HF_threshold[float]> <DP_threshold[float]> <out_type[vcf|txt]> <outfilename> <convert_to_homoplamy[Yes|No]> \n<vcf_file> can also be .gz file\n\n<convert_to_homoplasmy> is boolean and takes Yes or No values and converts HF >= 0.9 to GT=1/1. Useful for haplogroup prediction with other methods (e.g. haplogrep)\n\n'''
if __name__ == "__main__":
if len(sys.argv[1:]) < 7:
sys.stderr.write('ERROR: argument missing\n')
usage()
sys.exit(1)
samplename,vcf,HFt,DPt,out_type,outfile,homo_convert= sys.argv[1:]
HFt = float(HFt)
DPt = float(DPt)
out = open(outfile,'w')
homo_convert = str(homo_convert)
if homo_convert not in ['Yes','No']:
sys.stderr.write('Values accepted for <convert_to_homoplasmy> are [Yes|No].\nExit!\n')
sys.exit(1)
if 'gz' in vcf or 'gzip' or 'bz2' in vcf:
ifile = fileinput.input(vcf,openhook=fileinput.hook_compressed)
else:
ifile = fileinput.input(vcf)
for line in ifile:
if line.startswith('##'):
if out_type == 'vcf':
command_string = "##contig=<ID=chrMT,length=16569>\n##filter_VCF_command=filter_vcf.py {0} {1} {2} {3} {4} {5}\n".format(vcf,HFt,DPt,out_type,outfile,homo_convert)
out.write(line)
else:
pass
else:
if line.startswith('#CHROM') and out_type == 'vcf':
out.write(command_string)
line = line.split('\t')
line[-1] = samplename+'\n'
line = '\t'.join(line)
out.write(line)
elif line.startswith('#CHROM') and out_type == 'txt':
header='CHROM\tPOS\tID\tREF\tALT\tDP\tHF\tCIL\tCIU\t'+samplename
out.write(header+'\n')
else:
line = line.split('\t')
geno,DPv,HFv_l,CIL,CIU = line[-1].split(':')
geno = geno.split('/')
if '0' in geno:
geno.remove('0')
HFv_l = HFv_l.split(',')
CIL = CIL.split(',')
CIU = CIU.split(',')
ALT = line[4].split(',')
c =0
while c < (len(geno)):
HFv = float(HFv_l[c])
CILv = float(CIL[c])
CIUv = float(CIU[c])
DPv = float(DPv)
ALTv = str(ALT[c])
if DPv >= float(DPt) and HFv >= float(HFt):
if out_type == 'txt':
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,DPv,HFv,CILv,CIUv,samplename]))
out.write(res+'\n')
else:
if HFv == 1:
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=2,AN=2','GT','1/1']))
elif HFv >= 0.9 and homo_convert == 'Yes':
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=2,AN=2','GT','1/1']))
else:
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=1,AN=2','GT','0/1']))
out.write(res+'\n')
else:
pass
c += 1
out.close()
| gpl-3.0 | -3,761,606,398,086,951,400 | 33.776471 | 356 | 0.60115 | false |
fogleman/DCPU-16 | app/assembler.py | 1 | 16148 | import ply.lex as lex
import ply.yacc as yacc
# Constants
SIZE = 0x10000
# Lookups
BASIC_OPCODES = {
'SET': 0x01,
'ADD': 0x02,
'SUB': 0x03,
'MUL': 0x04,
'MLI': 0x05,
'DIV': 0x06,
'DVI': 0x07,
'MOD': 0x08,
'MDI': 0x09,
'AND': 0x0a,
'BOR': 0x0b,
'XOR': 0x0c,
'SHR': 0x0d,
'ASR': 0x0e,
'SHL': 0x0f,
'IFB': 0x10,
'IFC': 0x11,
'IFE': 0x12,
'IFN': 0x13,
'IFG': 0x14,
'IFA': 0x15,
'IFL': 0x16,
'IFU': 0x17,
'ADX': 0x1a,
'SUX': 0x1b,
'STI': 0x1e,
'STD': 0x1f,
}
SPECIAL_OPCODES = {
'JSR': 0x01,
'INT': 0x08,
'IAG': 0x09,
'IAS': 0x0a,
'RFI': 0x0b,
'IAQ': 0x0c,
'HWN': 0x10,
'HWQ': 0x11,
'HWI': 0x12,
}
COMMAND_OPCODES = {
'NOP': 0x0000,
'BRK': 0x0040,
'RFI': 0x0160,
}
REGISTERS = {
'A': 0x0,
'B': 0x1,
'C': 0x2,
'X': 0x3,
'Y': 0x4,
'Z': 0x5,
'I': 0x6,
'J': 0x7,
}
DST_CODES = {
'PUSH': 0x18,
'PEEK': 0x19,
'SP': 0x1b,
'PC': 0x1c,
'EX': 0x1d,
}
SRC_CODES = {
'POP': 0x18,
'PEEK': 0x19,
'SP': 0x1b,
'PC': 0x1c,
'EX': 0x1d,
}
# Reverse Lookups
REV_BASIC_OPCODES = dict((v, k) for k, v in BASIC_OPCODES.items())
REV_SPECIAL_OPCODES = dict((v, k) for k, v in SPECIAL_OPCODES.items())
REV_COMMAND_OPCODES = dict((v, k) for k, v in COMMAND_OPCODES.items())
REV_REGISTERS = dict((v, k) for k, v in REGISTERS.items())
REV_DST_CODES = dict((v, k) for k, v in DST_CODES.items())
REV_SRC_CODES = dict((v, k) for k, v in SRC_CODES.items())
# Helper Functions
def pretty_value(x):
return '%d' % x if x <= 0xff else '0x%04x' % x
def do_lookup(lookup, word):
if isinstance(word, basestring):
try:
word = lookup[word]
except KeyError:
raise Exception('Undefined symbol: "%s"' % word)
return word
# Classes
class Program(object):
def __init__(self, instructions):
self.instructions = instructions
self.text = None
self.lookup = {}
self.size = 0
for instruction in instructions:
if instruction.offset is None:
instruction.offset = self.size
self.size += instruction.size
if isinstance(instruction, Label):
self.lookup[instruction.name] = instruction.offset
def assemble(self):
result = []
for instruction in self.instructions:
result.extend(instruction.assemble(self.lookup))
return result
def pretty(self):
lines = []
skip = False
for instruction in self.instructions:
line = instruction.pretty().strip()
if isinstance(instruction, Label):
pad = 0
else:
pad = 4 if skip else 2
line = '%s%s' % (' ' * pad, line)
data = instruction.assemble(self.lookup)
if data and not isinstance(instruction, Data):
pad = ' ' * (32 - len(line))
data = ' '.join('%04x' % x for x in data)
line = '%s%s; %s' % (line, pad, data)
lines.append(line)
skip = instruction.conditional
return '\n'.join(lines)
class Data(object):
def __init__(self, data):
self.data = data
self.size = len(data)
self.offset = None
self.conditional = False
def assemble(self, lookup):
return [do_lookup(lookup, word) for word in self.data]
def pretty(self):
data = ', '.join('"%s"' % x if isinstance(x, str) else pretty_value(x)
for x in self.data)
return 'DAT %s' % data
class Reserve(object):
def __init__(self, size):
self.size = size
self.offset = None
self.conditional = False
def assemble(self, lookup):
return [0] * self.size
def pretty(self):
return 'RESERVE %s' % pretty_value(self.size)
class Label(object):
def __init__(self, name, offset=None):
self.name = name
self.size = 0
self.offset = offset
self.conditional = False
def assemble(self, lookup):
return []
def pretty(self):
return ':%s' % self.name
class BasicInstruction(object):
def __init__(self, op, dst, src):
self.op = op
self.dst = dst
self.src = src
value = self.op
value |= (self.dst.value & 0x1f) << 5
value |= (self.src.value & 0x3f) << 10
self.value = value
self.size = 1 + dst.size + src.size
self.offset = None
self.conditional = 0x10 <= self.op <= 0x17
def assemble(self, lookup):
result = [self.value]
result.extend(self.src.assemble(lookup))
result.extend(self.dst.assemble(lookup))
return result
def pretty(self):
op = REV_BASIC_OPCODES[self.op]
dst = self.dst.pretty()
src = self.src.pretty()
return '%s %s, %s' % (op, dst, src)
class SpecialInstruction(object):
def __init__(self, op, src):
self.op = op
self.src = src
value = 0
value |= (self.op & 0x1f) << 5
value |= (self.src.value & 0x3f) << 10
self.value = value
self.size = 1 + src.size
self.offset = None
self.conditional = False
def assemble(self, lookup):
result = [self.value]
result.extend(self.src.assemble(lookup))
return result
def pretty(self):
op = REV_SPECIAL_OPCODES[self.op]
src = self.src.pretty()
return '%s %s' % (op, src)
class CommandInstruction(object):
def __init__(self, value):
self.value = value
self.size = 1
self.offset = None
self.conditional = False
def assemble(self, lookup):
result = [self.value]
return result
def pretty(self):
return REV_COMMAND_OPCODES[self.value]
class Operand(object):
def __init__(self, codes, value, word=None):
self.codes = codes
self.value = value
self.word = word
self.size = int(word is not None)
def assemble(self, lookup):
return [] if self.word is None else [do_lookup(lookup, self.word)]
def pretty(self):
x = self.value
word = self.word
if isinstance(word, int):
word = pretty_value(word)
if x in REV_REGISTERS:
return REV_REGISTERS[x]
elif x - 0x08 in REV_REGISTERS:
return '[%s]' % REV_REGISTERS[x - 0x08]
elif x - 0x10 in REV_REGISTERS:
return '[%s + %s]' % (REV_REGISTERS[x - 0x10], word)
elif x in self.codes:
return self.codes[x]
elif x == 0x1a:
return 'PICK %s' % word
elif x == 0x1e:
return '[%s]' % word
elif x == 0x1f:
return '%s' % word
elif x == 0x20:
return pretty_value(0xffff)
elif x >= 0x21:
return pretty_value(x - 0x21)
class DstOperand(Operand):
def __init__(self, *args):
super(DstOperand, self).__init__(REV_DST_CODES, *args)
class SrcOperand(Operand):
def __init__(self, *args):
super(SrcOperand, self).__init__(REV_SRC_CODES, *args)
# Lexer Rules
reserved = set(
BASIC_OPCODES.keys() +
SPECIAL_OPCODES.keys() +
COMMAND_OPCODES.keys() +
REGISTERS.keys() +
DST_CODES.keys() +
SRC_CODES.keys() +
['PICK', 'DAT', 'RESERVE']
)
tokens = [
'LBRACK',
'RBRACK',
'PLUS',
'LABEL',
'ID',
'DECIMAL',
'HEX',
'OCT',
'STRING',
'CHAR',
'INC',
'DEC',
'AT'
] + list(reserved)
t_ignore = ' \t\r,'
t_ignore_COMMENT = r';.*'
t_INC = r'\+\+'
t_DEC = r'\-\-'
t_LBRACK = r'\['
t_RBRACK = r'\]'
t_PLUS = r'\+'
t_AT = r'\@'
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_STRING(t):
r'"[^"]*"'
t.value = tuple(ord(x) for x in t.value[1:-1])
return t
def t_CHAR(t):
r"'[^']'"
t.value = ord(t.value[1])
return t
def t_HEX(t):
r'\-?0x[a-fA-F0-9]+'
t.value = int(t.value, 16) % SIZE
return t
def t_OCT(t):
r'\-?0\d+'
t.value = int(t.value, 8) % SIZE
return t
def t_DECIMAL(t):
r'\-?\d+'
t.value = int(t.value) % SIZE
return t
def t_LABEL(t):
r':\.?[a-zA-Z_][a-zA-Z_0-9]*'
t.value = t.value[1:]
if t.value[0] == '.':
t.value = '%s%s' % (t.lexer.label_prefix, t.value)
else:
t.lexer.label_prefix = t.value
return t
def t_ID(t):
r'\.?[a-zA-Z_][a-zA-Z_0-9]*'
upper = t.value.upper()
if upper in reserved:
t.type = upper
t.value = upper
else:
t.type = 'ID'
if t.value[0] == '.':
t.value = '%s%s' % (t.lexer.label_prefix, t.value)
return t
def t_error(t):
raise Exception('Unrecognized token on line %d: %s' % (t.lineno, t.value))
# Parser Rules
def p_program(t):
'program : instructions'
t[0] = Program(t[1])
def p_instructions1(t):
'instructions : instruction instructions'
t[0] = (t[1],) + t[2]
def p_instructions2(t):
'instructions : instruction'
t[0] = (t[1],)
def p_data1(t):
'data : literal data'
arg = t[1] if isinstance(t[1], tuple) else (t[1],)
t[0] = arg + t[2]
def p_data2(t):
'data : literal'
arg = t[1] if isinstance(t[1], tuple) else (t[1],)
t[0] = arg
def p_instruction_data(t):
'instruction : DAT data'
t[0] = Data(t[2])
def p_instruction_reserve(t):
'instruction : RESERVE literal'
t[0] = Reserve(t[2])
def p_instruction_label1(t):
'instruction : LABEL'
t[0] = Label(t[1])
def p_instruction_label2(t):
'instruction : LABEL AT literal'
t[0] = Label(t[1], t[3])
def p_instruction_basic(t):
'instruction : basic_opcode dst_operand src_operand'
t[0] = BasicInstruction(t[1], t[2], t[3])
def p_instruction_special(t):
'instruction : special_opcode src_operand'
t[0] = SpecialInstruction(t[1], t[2])
def p_instruction_command(t):
'instruction : command_opcode'
t[0] = CommandInstruction(t[1])
def p_dst_operand_register(t):
'dst_operand : register'
t[0] = DstOperand(REGISTERS[t[1]])
def p_dst_operand_register_dereference(t):
'dst_operand : LBRACK register RBRACK'
t[0] = DstOperand(REGISTERS[t[2]] + 0x08)
def p_dst_operand_register_literal_dereference1(t):
'dst_operand : LBRACK register PLUS literal RBRACK'
t[0] = DstOperand(REGISTERS[t[2]] + 0x10, t[4])
def p_dst_operand_register_literal_dereference2(t):
'dst_operand : LBRACK literal PLUS register RBRACK'
t[0] = DstOperand(REGISTERS[t[4]] + 0x10, t[2])
def p_dst_operand_pick1(t):
'dst_operand : LBRACK SP PLUS literal RBRACK'
t[0] = DstOperand(0x1a, t[4])
def p_dst_operand_pick2(t):
'dst_operand : LBRACK literal PLUS SP RBRACK'
t[0] = DstOperand(0x1a, t[2])
def p_dst_operand_pick3(t):
'dst_operand : PICK literal'
t[0] = DstOperand(0x1a, t[2])
def p_dst_operand_code(t):
'dst_operand : dst_code'
t[0] = DstOperand(DST_CODES[t[1]])
def p_dst_operand_push(t):
'dst_operand : LBRACK DEC SP RBRACK'
t[0] = DstOperand(0x18)
def p_dst_operand_peek(t):
'dst_operand : LBRACK SP RBRACK'
t[0] = DstOperand(0x19)
def p_dst_operand_literal_dereference(t):
'dst_operand : LBRACK literal RBRACK'
t[0] = DstOperand(0x1e, t[2])
def p_dst_operand_literal(t):
'dst_operand : literal'
t[0] = DstOperand(0x1f, t[1])
def p_src_operand_register(t):
'src_operand : register'
t[0] = SrcOperand(REGISTERS[t[1]])
def p_src_operand_register_dereference(t):
'src_operand : LBRACK register RBRACK'
t[0] = SrcOperand(REGISTERS[t[2]] + 0x08)
def p_src_operand_register_literal_dereference1(t):
'src_operand : LBRACK register PLUS literal RBRACK'
t[0] = SrcOperand(REGISTERS[t[2]] + 0x10, t[4])
def p_src_operand_register_literal_dereference2(t):
'src_operand : LBRACK literal PLUS register RBRACK'
t[0] = SrcOperand(REGISTERS[t[4]] + 0x10, t[2])
def p_src_operand_pick1(t):
'src_operand : LBRACK SP PLUS literal RBRACK'
t[0] = SrcOperand(0x1a, t[4])
def p_src_operand_pick2(t):
'src_operand : LBRACK literal PLUS SP RBRACK'
t[0] = SrcOperand(0x1a, t[2])
def p_src_operand_pick3(t):
'src_operand : PICK literal'
t[0] = SrcOperand(0x1a, t[2])
def p_src_operand_code(t):
'src_operand : src_code'
t[0] = SrcOperand(SRC_CODES[t[1]])
def p_src_operand_pop(t):
'src_operand : LBRACK SP INC RBRACK'
t[0] = SrcOperand(0x18)
def p_src_operand_peek(t):
'src_operand : LBRACK SP RBRACK'
t[0] = SrcOperand(0x19)
def p_src_operand_literal_dereference(t):
'src_operand : LBRACK literal RBRACK'
t[0] = SrcOperand(0x1e, t[2])
def p_src_operand_literal(t):
'src_operand : literal'
if t[1] == 0xffff:
t[0] = SrcOperand(0x20)
elif t[1] <= 0x1e:
t[0] = SrcOperand(0x21 + t[1])
else:
t[0] = SrcOperand(0x1f, t[1])
def p_literal(t):
'''literal : DECIMAL
| HEX
| OCT
| ID
| STRING
| CHAR'''
t[0] = t[1]
def p_basic_opcode(t):
t[0] = BASIC_OPCODES[t[1]]
p_basic_opcode.__doc__ = ('basic_opcode : %s' %
'\n | '.join(sorted(BASIC_OPCODES)))
def p_special_opcode(t):
t[0] = SPECIAL_OPCODES[t[1]]
p_special_opcode.__doc__ = ('special_opcode : %s' %
'\n | '.join(sorted(SPECIAL_OPCODES)))
def p_command_opcode(t):
t[0] = COMMAND_OPCODES[t[1]]
p_command_opcode.__doc__ = ('command_opcode : %s' %
'\n | '.join(sorted(COMMAND_OPCODES)))
def p_register(t):
t[0] = t[1]
p_register.__doc__ = ('register : %s' %
'\n | '.join(sorted(REGISTERS)))
def p_dst_code(t):
t[0] = t[1]
p_dst_code.__doc__ = ('dst_code : %s' %
'\n | '.join(sorted(DST_CODES)))
def p_src_code(t):
t[0] = t[1]
p_src_code.__doc__ = ('src_code : %s' %
'\n | '.join(sorted(SRC_CODES)))
def p_error(t):
raise Exception('Invalid token on line %d: %s' % (t.lineno, t.value))
# Assembler Functions
def create_lexer():
lexer = lex.lex()
lexer.label_prefix = None
return lexer
def create_parser():
parser = yacc.yacc(debug=False, write_tables=False)
return parser
LEXER = create_lexer()
PARSER = create_parser()
def parse(text):
LEXER.lineno = 1
program = PARSER.parse(text, lexer=LEXER)
program.text = text
return program
def parse_file(path):
with open(path) as fp:
text = fp.read()
return parse(text)
def assemble(text):
program = parse(text)
return program.assemble()
def assemble_file(path):
with open(path) as fp:
text = fp.read()
return assemble(text)
def pretty(text):
program = parse(text)
return program.pretty()
def pretty_file(path):
with open(path) as fp:
text = fp.read()
return pretty(text)
# Disassembler Functions
def disassemble(words):
def next_word():
return words.pop() if words else 0
instructions = []
use_next_word = set(range(0x10, 0x18) + [0x1a, 0x1e, 0x1f])
words = list(reversed(words))
while words:
word = next_word()
op = word & 0x1f
dst = (word >> 5) & 0x1f
src = (word >> 10) & 0x3f
if op != 0 and op in REV_BASIC_OPCODES:
dst = DstOperand(dst, next_word()
if dst in use_next_word else None)
src = SrcOperand(src, next_word()
if src in use_next_word else None)
instruction = BasicInstruction(op, dst, src)
instructions.append(instruction)
elif op == 0 and dst in REV_SPECIAL_OPCODES:
src = SrcOperand(src, next_word()
if src in use_next_word else None)
instruction = SpecialInstruction(dst, src)
instructions.append(instruction)
else:
instruction = Data([word])
instructions.append(instruction)
program = Program(instructions)
program.text = program.pretty()
return program
def disassemble_file(path):
with open(path, 'rb') as fp:
data = fp.read()
words = [(ord(a) << 8) | ord(b) for a, b in zip(data[::2], data[1::2])]
return disassemble(words)
| mit | 4,979,945,226,758,335,000 | 24.631746 | 78 | 0.560503 | false |
bnkr/selenit | selenibench/scripts.py | 1 | 3871 | from __future__ import print_function
import sys, argparse, selenium, contextlib, os, json, traceback
from datetime import datetime as DateTime
from datetime import timedelta as TimeDelta
from selenium.webdriver import Remote as WebDriverRemote
from selenium.webdriver.support.ui import WebDriverWait
class SelenibenchCli(object):
"""Downloads timings from the web performance api."""
def __init__(self, argv):
self.argv = argv
def run(self):
parser = self.get_parser()
settings = self.get_settings(parser)
if settings.log_json:
io = open(settings.log_json, 'w')
else:
io = None
runs = 0
contiguous_failures = 0
while runs < settings.number:
runs += 1
remote = WebDriverRemote(command_executor=settings.webdriver,
desired_capabilities=settings.capabilities)
with contextlib.closing(remote) as driver:
try:
driver.get(settings.url[0])
self.find_load_times(driver, io)
contiguous_failures = 0
except:
if contiguous_failures > 3:
print("Failure getting load times. Giving up.")
raise
contiguous_failures += 1
runs -= 1
print("Failure getting load times. Will try again.")
traceback.print_ex()
return 0
def find_load_times(self, driver, log):
def is_loaded(driver):
return driver.execute_script("return (document.readyState == 'complete')")
WebDriverWait(driver, 15).until(is_loaded)
timings = driver.execute_script("return window.performance.timing")
times = {}
for key, value in timings.iteritems():
if not isinstance(value, int):
continue
if value in (True, False):
continue
value = str(value)
unixey = int(value[0:10])
if value[10:]:
ms = int(value[10:])
else:
ms = 0
converted = DateTime.fromtimestamp(unixey)
converted += TimeDelta(milliseconds=ms)
times[key] = converted
# This kind of thing really needs unit tests. The thing takes so long
# to run it's just going to break horribly.
if log:
serialisable = dict(
(key, value.isoformat())
for key, value in times.iteritems())
log.write(json.dumps(serialisable))
log.write("\n")
print(times)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument("url", nargs="+")
parser.add_argument("-w", "--webdriver", required=True,
help="Location to hub or webdriver.")
parser.add_argument("-c", "--capabilities", action="append", default=[],
help="Add a capability.")
parser.add_argument("-n", "--number", type=int, default=1,
help="How many requests to run.")
parser.add_argument("-j", "--log-json", default=None,
help="Log json per-line for each hit.")
return parser
def get_settings(self, parser):
settings = parser.parse_args(self.argv[1:])
capabilities = {'browserName': "firefox"}
for capability in settings.capabilities:
name, value = capability.split("=")
capabilities[name.strip()] = value.strip()
settings.capabilities = capabilities
return settings
def selenibench_main():
"""Command-line entry point."""
cli = SelenibenchCli(sys.argv)
sys.exit(cli.run())
| mit | -2,705,399,600,824,886,300 | 32.08547 | 86 | 0.547145 | false |
tobykurien/MakerDroid | assetsrc/public.mp3/fabmetheus_utilities/vector3index.py | 1 | 13371 | """
Vector3 is a three dimensional vector class.
Below are examples of Vector3 use.
>>> from vector3 import Vector3
>>> origin = Vector3()
>>> origin
0.0, 0.0, 0.0
>>> pythagoras = Vector3( 3, 4, 0 )
>>> pythagoras
3.0, 4.0, 0.0
>>> pythagoras.magnitude()
5.0
>>> pythagoras.magnitudeSquared()
25
>>> triplePythagoras = pythagoras * 3.0
>>> triplePythagoras
9.0, 12.0, 0.0
>>> plane = pythagoras.dropAxis( 2 )
>>> plane
(3+4j)
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
import math
import operator
__author__ = "Enrique Perez ([email protected])"
__credits__ = 'Nophead <http://forums.reprap.org/profile.php?12,28>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
class Vector3Index:
"A three dimensional vector index class."
__slots__ = [ 'index', 'x', 'y', 'z' ]
def __init__( self, index, x = 0.0, y = 0.0, z = 0.0 ):
self.index = index
self.x = x
self.y = y
self.z = z
def __abs__( self ):
"Get the magnitude of the Vector3."
return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z )
magnitude = __abs__
def __add__( self, other ):
"Get the sum of this Vector3 and other one."
return Vector3( self.x + other.x, self.y + other.y, self.z + other.z )
def __copy__( self ):
"Get the copy of this Vector3."
return Vector3( self.x, self.y, self.z )
__pos__ = __copy__
copy = __copy__
def __div__( self, other ):
"Get a new Vector3 by dividing each component of this one."
return Vector3( self.x / other, self.y / other, self.z / other )
def __eq__( self, other ):
"Determine whether this vector is identical to other one."
if other == None:
return False
return self.x == other.x and self.y == other.y and self.z == other.z
def __floordiv__( self, other ):
"Get a new Vector3 by floor dividing each component of this one."
return Vector3( self.x // other, self.y // other, self.z // other )
def __hash__( self ):
"Determine whether this vector is identical to other one."
return self.__repr__().__hash__()
def __iadd__( self, other ):
"Add other Vector3 to this one."
self.x += other.x
self.y += other.y
self.z += other.z
return self
def __idiv__( self, other ):
"Divide each component of this Vector3."
self.x /= other
self.y /= other
self.z /= other
return self
def __ifloordiv__( self, other ):
"Floor divide each component of this Vector3."
self.x //= other
self.y //= other
self.z //= other
return self
def __imul__( self, other ):
"Multiply each component of this Vector3."
self.x *= other
self.y *= other
self.z *= other
return self
def __isub__( self, other ):
"Subtract other Vector3 from this one."
self.x -= other.x
self.y -= other.y
self.z -= other.z
return self
def __itruediv__( self, other ):
"True divide each component of this Vector3."
self.x = operator.truediv( self.x, other )
self.y = operator.truediv( self.y, other )
self.z = operator.truediv( self.z, other )
return self
def __mul__( self, other ):
"Get a new Vector3 by multiplying each component of this one."
return Vector3( self.x * other, self.y * other, self.z * other )
def __ne__( self, other ):
"Determine whether this vector is not identical to other one."
return not self.__eq__( other )
def __neg__( self ):
return Vector3( - self.x, - self.y, - self.z )
def __nonzero__( self ):
return self.x != 0 or self.y != 0 or self.z != 0
def __repr__( self ):
"Get the string representation of this Vector3."
return '%s, %s, %s, %s' % ( self.index, self.x, self.y, self.z )
def __rdiv__( self, other ):
"Get a new Vector3 by dividing each component of this one."
return Vector3( other / self.x, other / self.y, other / self.z )
def __rfloordiv__( self, other ):
"Get a new Vector3 by floor dividing each component of this one."
return Vector3( other // self.x, other // self.y, other // self.z )
def __rmul__( self, other ):
"Get a new Vector3 by multiplying each component of this one."
return Vector3( self.x * other, self.y * other, self.z * other )
def __rtruediv__( self, other ):
"Get a new Vector3 by true dividing each component of this one."
return Vector3( operator.truediv( other , self.x ), operator.truediv( other, self.y ), operator.truediv( other, self.z ) )
def __sub__( self, other ):
"Get the difference between the Vector3 and other one."
return Vector3( self.x - other.x, self.y - other.y, self.z - other.z )
def __truediv__( self, other ):
"Get a new Vector3 by true dividing each component of this one."
return Vector3( operator.truediv( self.x, other ), operator.truediv( self.y, other ), operator.truediv( self.z, other ) )
def cross( self, other ):
"Calculate the cross product of this vector with other one."
return Vector3( self.y * other.z - self.z * other.y, - self.x * other.z + self.z * other.x, self.x * other.y - self.y * other.x )
def distance( self, other ):
"Get the Euclidean distance between this vector and other one."
return math.sqrt( self.distanceSquared( other ) )
def distanceSquared( self, other ):
"Get the square of the Euclidean distance between this vector and other one."
separationX = self.x - other.x
separationY = self.y - other.y
separationZ = self.z - other.z
return separationX * separationX + separationY * separationY + separationZ * separationZ
def dot( self, other ):
"Calculate the dot product of this vector with other one."
return self.x * other.x + self.y * other.y + self.z * other.z
def dropAxis( self, which ):
"""Get a complex by removing one axis of this one.
Keyword arguments:
which -- the axis to drop (0=X, 1=Y, 2=Z)"""
if which == 0:
return complex( self.y, self.z )
if which == 1:
return complex( self.x, self.z )
if which == 2:
return complex( self.x, self.y )
def getNormalized( self, other ):
"Get the normalized Vector3."
magnitude = abs( self )
if magnitude == 0.0:
return self.copy()
return self / magnitude
def magnitudeSquared( self ):
"Get the square of the magnitude of the Vector3."
return self.x * self.x + self.y * self.y + self.z * self.z
def normalize( self ):
"Scale each component of this Vector3 so that it has a magnitude of 1. If this Vector3 has a magnitude of 0, this method has no effect."
magnitude = abs( self )
if magnitude != 0.0:
self /= magnitude
def reflect( self, normal ):
"Reflect the Vector3 across the normal, which is assumed to be normalized."
distance = 2 * ( self.x * normal.x + self.y * normal.y + self.z * normal.z )
return Vector3( self.x - distance * normal.x, self.y - distance * normal.y, self.z - distance * normal.z )
def setToVector3( self, other ):
"Set this Vector3 to be identical to other one."
self.x = other.x
self.y = other.y
self.z = other.z
def setToXYZ( self, x, y, z ):
"Set the x, y, and z components of this Vector3."
self.x = x
self.y = y
self.z = z
"""
class Vector3:
__slots__ = ['x', 'y', 'z']
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __copy__(self):
return self.__class__(self.x, self.y, self.z)
copy = __copy__
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __eq__(self, other):
if isinstance(other, Vector3):
return self.x == other.x and \
self.y == other.y and \
self.z == other.z
else:
assert hasattr(other, '__len__') and len(other) == 3
return self.x == other[0] and \
self.y == other[1] and \
self.z == other[2]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __len__(self):
return 3
def __getitem__(self, key):
return (self.x, self.y, self.z)[key]
def __setitem__(self, key, value):
l = [self.x, self.y, self.z]
l[key] = value
self.x, self.y, self.z = l
def __iter__(self):
return iter((self.x, self.y, self.z))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y, self.z)['xyz'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y, self.z]
for c, v in map(None, name, value):
l['xyz'.index(c)] = v
self.x, self.y, self.z = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector3):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return _class(self.x + other.x,
self.y + other.y,
self.z + other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x + other[0],
self.y + other[1],
self.z + other[2])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector3):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other[0]
self.y += other[1]
self.z += other[2]
return self
def __sub__(self, other):
if isinstance(other, Vector3):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return Vector3(self.x - other.x,
self.y - other.y,
self.z - other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x - other[0],
self.y - other[1],
self.z - other[2])
def __rsub__(self, other):
if isinstance(other, Vector3):
return Vector3(other.x - self.x,
other.y - self.y,
other.z - self.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(other.x - self[0],
other.y - self[1],
other.z - self[2])
def __mul__(self, other):
if isinstance(other, Vector3):
# TODO component-wise mul/div in-place and on Vector2; docs.
if self.__class__ is Point3 or other.__class__ is Point3:
_class = Point3
else:
_class = Vector3
return _class(self.x * other.x,
self.y * other.y,
self.z * other.z)
else:
assert type(other) in (int, long, float)
return Vector3(self.x * other,
self.y * other,
self.z * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
self.z *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(self.x, other),
operator.div(self.y, other),
operator.div(self.z, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(other, self.x),
operator.div(other, self.y),
operator.div(other, self.z))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(self.x, other),
operator.floordiv(self.y, other),
operator.floordiv(self.z, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(other, self.x),
operator.floordiv(other, self.y),
operator.floordiv(other, self.z))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(self.x, other),
operator.truediv(self.y, other),
operator.truediv(self.z, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(other, self.x),
operator.truediv(other, self.y),
operator.truediv(other, self.z))
def __neg__(self):
return Vector3(-self.x,
-self.y,
-self.z)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector3(self.x / d,
self.y / d,
self.z / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector3)
return self.x * other.x + \
self.y * other.y + \
self.z * other.z
def cross(self, other):
assert isinstance(other, Vector3)
return Vector3(self.y * other.z - self.z * other.y,
-self.x * other.z + self.z * other.x,
self.x * other.y - self.y * other.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector3)
d = 2 * (self.x * normal.x + self.y * normal.y + self.z * normal.z)
return Vector3(self.x - d * normal.x,
self.y - d * normal.y,
self.z - d * normal.z)
""" | gpl-3.0 | -3,217,481,328,199,761,400 | 26.289796 | 157 | 0.614315 | false |
IQSS/geoconnect | gc_apps/classification/layer_link_helper.py | 1 | 5041 | """
Used for development to create WorldMap-related links from a layer name
"""
from __future__ import print_function
import logging
import re
import requests
from django.conf import settings
LOGGER = logging.getLogger(__name__)
GEONODE_PREFIX = 'geonode:'
class LayerLink(object):
"""Holds name, link, description"""
def __init__(self, name, link, description=None):
self.name = name
self.link = link
self.description = description
def show(self):
"""print info"""
info = ('name: {0}'
'link: {1}'
'description: {2}'\
).format(self.name, self.link, self.description)
print (info)
class LayerLinkHelper(object):
"""
For development/debugging, given a WorldMap layer name, create links
related to various geonode services including:
- Listing geoserver attributes for the layer
- Retrieving the current SLD in XML format
- Showing the classify service url, etc.
"""
def __init__(self, layer_name, server_name='http://localhost:8000'):
assert layer_name is not None, "layer_name cannot be None"
self.layer_name = layer_name # geonode:boston_social_disorder
self.server_name = server_name
if self.server_name.endswith('/'):
self.server_name = self.server_name[:-1]
self.layer_name_no_prefix = None # boston_social_disorder
self.links_dict = {}
self.links_list = []
# Secondary processing involving requests
self.sld_name = None
self.format_layer_name()
self.format_layer_links()
def format_layer_name(self):
"""
Make sure the layer name has the GEONODE_PREFIX
e.g. "geonode:boston_social_disorder"
Set a variable w/o the prefix
e.g. layer_name_no_prefix = "boston_social_disorder"
"""
if not self.layer_name.startswith(GEONODE_PREFIX):
self.layer_name = '%s%s' % (GEONODE_PREFIX, self.layer_name)
self.layer_name_no_prefix = self.layer_name[len(GEONODE_PREFIX):]
def add_link(self, name, link, description=''):
"""
Add a LayerLink object to "links_list"
"""
layer_link_obj = LayerLink(name, link, description)
# add to list
self.links_list.append(layer_link_obj)
# add to dict
self.links_dict[name] = layer_link_obj
LOGGER.debug('links count: %s', len(self.links_list))
def get_geoserver(self):
"""Retrieve the geoserver url"""
return self.server_name.replace(':8000', ':8080')
def format_layer_links(self):
"""Format/Create the layer links"""
# View layer
view_url = '%s/data/%s' % (self.server_name, self.layer_name)
self.add_link('wm_layer', view_url, 'WorldMap layer view')
# Geoserver attributes
attr_url = ('%s/geoserver/rest/sldservice/%s/attributes.xml'\
% (self.get_geoserver(), self.layer_name))
self.add_link('attributes', attr_url, 'Geoserver Attributes')
# SLD Name
layer_url = '%s/geoserver/rest/layers/%s.html' %\
(self.get_geoserver(), self.layer_name_no_prefix)
self.add_link('sld_name', layer_url, 'SLD name')
if not self.get_sld_name():
return
sld_url = '%s/geoserver/rest/styles/%s.sld' % \
(self.get_geoserver(), self.sld_name)
self.add_link('sld_xml', sld_url, 'current SLD XML')
sld_url2 = '%s%s%s%s' % (\
self.get_geoserver(),
'/geoserver/web/?wicket:bookmarkablePage=',
':org.geoserver.wms.web.data.StyleEditPage&name=',
self.sld_name)
self.add_link('sld_xml2', sld_url2, 'Editable/Formatted SLD XML')
def get_sld_name(self):
"""
Retrieve the layer's SLD name from the server
"""
if not 'sld_name' in self.links_dict:
return False
sld_url = self.links_dict['sld_name'].link
#print ('Attempt to retrieve SLD sld_url: %s' % sld_url)
resp = requests.get(sld_url, auth=settings.WORLDMAP_ACCOUNT_AUTH)
if not resp.status_code == 200:
LOGGER.error('Failed to retrieve SLD: %s', sld_url)
return False
# Parse out the SLD Name
sld_search = re.search(r'<li>Default style: StyleInfoImpl\[(.*)\]',\
resp.text, re.IGNORECASE)
if sld_search is None:
LOGGER.error('Failed to retrieve SLD')
return False
sld_name = sld_search.group(1)
self.sld_name = sld_name
return True
"""
if title_search:
title = title_search.group(1)
content = r.text
start_tag =
idx = content.find('<li>Default style: StyleInfoImpl[')
if idx == -1:
print 'Failed to retrieve SLD'
return
end_idx = content.find(']', idx +
print r.text
"""
| apache-2.0 | 4,475,897,529,455,973,400 | 29.551515 | 76 | 0.575878 | false |
MetricsGrimoire/sortinghat | tests/test_cmd_log.py | 1 | 8958 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <[email protected]>
#
import datetime
import sys
import unittest
if '..' not in sys.path:
sys.path.insert(0, '..')
from sortinghat import api
from sortinghat.command import CMD_SUCCESS
from sortinghat.cmd.log import Log
from sortinghat.exceptions import CODE_INVALID_DATE_ERROR, CODE_VALUE_ERROR, CODE_NOT_FOUND_ERROR
from tests.base import TestCommandCaseBase
LOG_UUID_NOT_FOUND_ERROR = "Error: Jane Roe not found in the registry"
LOG_ORG_NOT_FOUND_ERROR = "Error: LibreSoft not found in the registry"
LOG_INVALID_PERIOD_ERROR = "Error: 'from_date' 2001-01-01 00:00:00 cannot be greater than 1999-01-01 00:00:00"
LOG_INVALID_DATE_ERROR = "Error: 1999-13-01 is not a valid date"
LOG_INVALID_FORMAT_DATE_ERROR = "Error: YYZYY is not a valid date"
LOG_EMPTY_OUTPUT = ""
LOG_OUTPUT = """John Doe\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00
John Smith\tBitergia\t1900-01-01 00:00:00\t2100-01-01 00:00:00
John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00
John Smith\tBitergia\t2006-01-01 00:00:00\t2008-01-01 00:00:00
John Smith\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00"""
LOG_UUID_OUTPUT = """John Doe\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00"""
LOG_ORG_OUTPUT = """John Smith\tBitergia\t1900-01-01 00:00:00\t2100-01-01 00:00:00
John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00
John Smith\tBitergia\t2006-01-01 00:00:00\t2008-01-01 00:00:00"""
LOG_TIME_PERIOD_OUTPUT = """John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00"""
class TestLogCaseBase(TestCommandCaseBase):
"""Defines common setup and teardown methods on log unit tests"""
cmd_klass = Log
def load_test_dataset(self):
self.db.clear()
api.add_unique_identity(self.db, 'John Smith')
api.add_unique_identity(self.db, 'John Doe')
api.add_organization(self.db, 'Example')
api.add_organization(self.db, 'Bitergia')
api.add_enrollment(self.db, 'John Smith', 'Example')
api.add_enrollment(self.db, 'John Doe', 'Example')
api.add_enrollment(self.db, 'John Smith', 'Bitergia')
api.add_enrollment(self.db, 'John Smith', 'Bitergia',
datetime.datetime(1999, 1, 1),
datetime.datetime(2000, 1, 1))
api.add_enrollment(self.db, 'John Smith', 'Bitergia',
datetime.datetime(2006, 1, 1),
datetime.datetime(2008, 1, 1))
class TestLogCommand(TestLogCaseBase):
"""Unit tests for log command"""
def test_log(self):
"""Check log output"""
code = self.cmd.run()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_OUTPUT)
def test_log_uuid(self):
"""Check log using a uuid"""
code = self.cmd.run('--uuid', 'John Doe')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_UUID_OUTPUT)
def test_log_organization(self):
"""Check log using a organization"""
code = self.cmd.run('--organization', 'Bitergia')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_ORG_OUTPUT)
def test_log_period(self):
"""Check log using a time period"""
code = self.cmd.run('--from', '1990-1-1 08:59:17',
'--to', '2005-1-1')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_TIME_PERIOD_OUTPUT)
def test_log_mix_filter(self):
"""Check log using some filters"""
code = self.cmd.run('--uuid', 'John Doe',
'--organization', 'Example',
'--from', '1990-1-1 08:59:17',
'--to', '2005-1-1')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_EMPTY_OUTPUT)
def test_empty_registry(self):
"""Check output when the registry is empty"""
# Delete the contents of the database
self.db.clear()
code = self.cmd.run()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_EMPTY_OUTPUT)
def test_invalid_dates(self):
"""Check whether it fails when invalid dates are given"""
code = self.cmd.run('--from', '1999-13-01')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
output = sys.stderr.getvalue().strip().split('\n')[0]
self.assertEqual(output, LOG_INVALID_DATE_ERROR)
code = self.cmd.run('--from', 'YYZYY')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
x = sys.stderr.getvalue()
output = sys.stderr.getvalue().strip().split('\n')[-1]
self.assertEqual(output, LOG_INVALID_FORMAT_DATE_ERROR)
code = self.cmd.run('--to', '1999-13-01')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
x = sys.stderr.getvalue()
output = sys.stderr.getvalue().strip().split('\n')[-1]
self.assertEqual(output, LOG_INVALID_DATE_ERROR)
code = self.cmd.run('--to', 'YYZYY')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
x = sys.stderr.getvalue()
output = sys.stderr.getvalue().strip().split('\n')[-1]
self.assertEqual(output, LOG_INVALID_FORMAT_DATE_ERROR)
class TestLog(TestLogCaseBase):
"""Unit tests for log"""
def test_log(self):
"""Check log output"""
code = self.cmd.log()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_OUTPUT)
def test_log_uuid(self):
"""Check log using a uuid"""
code = self.cmd.log('John Doe')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_UUID_OUTPUT)
def test_log_organization(self):
"""Check log using a organization"""
code = self.cmd.log(organization='Bitergia')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_ORG_OUTPUT)
def test_log_period(self):
"""Check log using a time period"""
code = self.cmd.log(from_date=datetime.datetime(1990, 1, 1),
to_date=datetime.datetime(2005, 1, 1))
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_TIME_PERIOD_OUTPUT)
def test_period_ranges(self):
"""Check whether enrollments cannot be listed giving invalid period ranges"""
code = self.cmd.log('John Smith', 'Example',
datetime.datetime(2001, 1, 1),
datetime.datetime(1999, 1, 1))
self.assertEqual(code, CODE_VALUE_ERROR)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, LOG_INVALID_PERIOD_ERROR)
def test_not_found_uuid(self):
"""Check whether it raises an error when the uiid is not available"""
code = self.cmd.log(uuid='Jane Roe')
self.assertEqual(code, CODE_NOT_FOUND_ERROR)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, LOG_UUID_NOT_FOUND_ERROR)
def test_not_found_organization(self):
"""Check whether it raises an error when the organization is not available"""
code = self.cmd.log(organization='LibreSoft')
self.assertEqual(code, CODE_NOT_FOUND_ERROR)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, LOG_ORG_NOT_FOUND_ERROR)
def test_empty_registry(self):
"""Check output when the registry is empty"""
# Delete the contents of the database
self.db.clear()
code = self.cmd.log()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stderr.getvalue().strip('\n')
self.assertEqual(output, LOG_EMPTY_OUTPUT)
if __name__ == "__main__":
unittest.main(buffer=True, exit=False)
| gpl-3.0 | 3,161,797,829,543,985,000 | 35.263158 | 110 | 0.631126 | false |
sbidoul/buildbot | master/buildbot/www/oauth2.py | 1 | 9591 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.moves.urllib.parse import parse_qs
from future.moves.urllib.parse import urlencode
from future.utils import iteritems
from future.utils import string_types
import json
from posixpath import join
import requests
from twisted.internet import defer
from twisted.internet import threads
from buildbot.www import auth
from buildbot.www import resource
class OAuth2LoginResource(auth.LoginResource):
# disable reconfigResource calls
needsReconfig = False
def __init__(self, master, _auth):
auth.LoginResource.__init__(self, master)
self.auth = _auth
def render_POST(self, request):
return self.asyncRenderHelper(request, self.renderLogin)
@defer.inlineCallbacks
def renderLogin(self, request):
code = request.args.get(b"code", [""])[0]
token = request.args.get(b"token", [""])[0]
if not token and not code:
url = request.args.get("redirect", [None])[0]
url = yield self.auth.getLoginURL(url)
raise resource.Redirect(url)
else:
if not token:
details = yield self.auth.verifyCode(code)
else:
details = yield self.auth.acceptToken(token)
if self.auth.userInfoProvider is not None:
infos = yield self.auth.userInfoProvider.getUserInfo(details['username'])
details.update(infos)
session = request.getSession()
session.user_info = details
session.updateSession(request)
state = request.args.get("state", [""])[0]
if state:
for redirect in parse_qs(state).get('redirect', []):
raise resource.Redirect(self.auth.homeUri + "#" + redirect)
raise resource.Redirect(self.auth.homeUri)
class OAuth2Auth(auth.AuthBase):
name = 'oauth2'
getTokenUseAuthHeaders = False
authUri = None
tokenUri = None
grantType = 'authorization_code'
authUriAdditionalParams = {}
tokenUriAdditionalParams = {}
loginUri = None
homeUri = None
sslVerify = None
def __init__(self,
clientId, clientSecret, autologin=False, **kwargs):
auth.AuthBase.__init__(self, **kwargs)
self.clientId = clientId
self.clientSecret = clientSecret
self.autologin = autologin
def reconfigAuth(self, master, new_config):
self.master = master
self.loginUri = join(new_config.buildbotURL, "auth/login")
self.homeUri = new_config.buildbotURL
def getConfigDict(self):
return dict(name=self.name,
oauth2=True,
fa_icon=self.faIcon,
autologin=self.autologin
)
def getLoginResource(self):
return OAuth2LoginResource(self.master, self)
def getLoginURL(self, redirect_url):
"""
Returns the url to redirect the user to for user consent
"""
oauth_params = {'redirect_uri': self.loginUri,
'client_id': self.clientId, 'response_type': 'code'}
if redirect_url is not None:
oauth_params['state'] = urlencode(dict(redirect=redirect_url))
oauth_params.update(self.authUriAdditionalParams)
sorted_oauth_params = sorted(oauth_params.items(), key=lambda val: val[0])
return defer.succeed("%s?%s" % (self.authUri, urlencode(sorted_oauth_params)))
def createSessionFromToken(self, token):
s = requests.Session()
s.params = {'access_token': token['access_token']}
s.verify = self.sslVerify
return s
def get(self, session, path):
ret = session.get(self.resourceEndpoint + path)
return ret.json()
# If the user wants to authenticate directly with an access token they
# already have, go ahead and just directly accept an access_token from them.
def acceptToken(self, token):
def thd():
session = self.createSessionFromToken({'access_token': token})
return self.getUserInfoFromOAuthClient(session)
return threads.deferToThread(thd)
# based on https://github.com/maraujop/requests-oauth
# from Miguel Araujo, augmented to support header based clientSecret
# passing
def verifyCode(self, code):
# everything in deferToThread is not counted with trial --coverage :-(
def thd():
url = self.tokenUri
data = {'redirect_uri': self.loginUri, 'code': code,
'grant_type': self.grantType}
auth = None
if self.getTokenUseAuthHeaders:
auth = (self.clientId, self.clientSecret)
else:
data.update(
{'client_id': self.clientId, 'client_secret': self.clientSecret})
data.update(self.tokenUriAdditionalParams)
response = requests.post(
url, data=data, auth=auth, verify=self.sslVerify)
response.raise_for_status()
if isinstance(response.content, string_types):
try:
content = json.loads(response.content)
except ValueError:
content = parse_qs(response.content)
for k, v in iteritems(content):
content[k] = v[0]
else:
content = response.content
session = self.createSessionFromToken(content)
return self.getUserInfoFromOAuthClient(session)
return threads.deferToThread(thd)
def getUserInfoFromOAuthClient(self, c):
return {}
class GoogleAuth(OAuth2Auth):
name = "Google"
faIcon = "fa-google-plus"
resourceEndpoint = "https://www.googleapis.com/oauth2/v1"
authUri = 'https://accounts.google.com/o/oauth2/auth'
tokenUri = 'https://accounts.google.com/o/oauth2/token'
authUriAdditionalParams = dict(scope=" ".join([
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile'
]))
def getUserInfoFromOAuthClient(self, c):
data = self.get(c, '/userinfo')
return dict(full_name=data["name"],
username=data['email'].split("@")[0],
email=data["email"],
avatar_url=data["picture"])
class GitHubAuth(OAuth2Auth):
name = "GitHub"
faIcon = "fa-github"
authUri = 'https://github.com/login/oauth/authorize'
authUriAdditionalParams = {'scope': 'user:email read:org'}
tokenUri = 'https://github.com/login/oauth/access_token'
resourceEndpoint = 'https://api.github.com'
def getUserInfoFromOAuthClient(self, c):
user = self.get(c, '/user')
emails = self.get(c, '/user/emails')
for email in emails:
if email.get('primary', False):
user['email'] = email['email']
break
orgs = self.get(c, '/user/orgs')
return dict(full_name=user['name'],
email=user['email'],
username=user['login'],
groups=[org['login'] for org in orgs])
class GitLabAuth(OAuth2Auth):
name = "GitLab"
faIcon = "fa-git"
def __init__(self, instanceUri, clientId, clientSecret, **kwargs):
uri = instanceUri.rstrip("/")
self.authUri = "%s/oauth/authorize" % uri
self.tokenUri = "%s/oauth/token" % uri
self.resourceEndpoint = "%s/api/v3" % uri
super(GitLabAuth, self).__init__(clientId, clientSecret, **kwargs)
def getUserInfoFromOAuthClient(self, c):
user = self.get(c, "/user")
groups = self.get(c, "/groups")
return dict(full_name=user["name"],
username=user["username"],
email=user["email"],
avatar_url=user["avatar_url"],
groups=[g["path"] for g in groups])
class BitbucketAuth(OAuth2Auth):
name = "Bitbucket"
faIcon = "fa-bitbucket"
authUri = 'https://bitbucket.org/site/oauth2/authorize'
tokenUri = 'https://bitbucket.org/site/oauth2/access_token'
resourceEndpoint = 'https://api.bitbucket.org/2.0'
def getUserInfoFromOAuthClient(self, c):
user = self.get(c, '/user')
emails = self.get(c, '/user/emails')
for email in emails["values"]:
if email.get('is_primary', False):
user['email'] = email['email']
break
orgs = self.get(c, '/teams?role=member')
return dict(full_name=user['display_name'],
email=user['email'],
username=user['username'],
groups=[org['username'] for org in orgs["values"]])
| gpl-2.0 | -19,559,649,487,698,830 | 36.759843 | 89 | 0.603587 | false |
nijx/hypertable | src/py/ThriftClient/client_test.py | 1 | 4079 | import sys
import time
from hypertable.thriftclient import *
from hyperthrift.gen.ttypes import *
try:
client = ThriftClient("localhost", 38080)
print "HQL examples"
try:
namespace = client.namespace_open("bad")
except:
print "Caught exception when tyring to open 'bad' namespace"
namespace = client.namespace_open("test")
res = client.hql_query(namespace, "show tables")
print res
res = client.hql_query(namespace, "select * from thrift_test")
print res
print "mutator examples";
mutator = client.mutator_open(namespace, "thrift_test", 0, 0);
client.mutator_set_cell(mutator, Cell(Key("py-k1", "col", None), "py-v1"))
client.mutator_flush(mutator);
client.mutator_close(mutator);
print "shared mutator examples";
mutate_spec = MutateSpec("test_py", 1000, 0);
client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k1", "col", None), "py-put-v1"))
client.shared_mutator_refresh(namespace, "thrift_test", mutate_spec)
client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k2", "col", None), "py-put-v2"))
time.sleep(2)
print "scanner examples";
scanner = client.scanner_open(namespace, "thrift_test",
ScanSpec(None, None, None, 1));
while True:
cells = client.scanner_get_cells(scanner)
if (len(cells) == 0):
break
print cells
client.scanner_close(scanner)
print "asynchronous api examples\n";
future = client.future_open(0);
mutator_async_1 = client.async_mutator_open(namespace, "thrift_test", future, 0);
mutator_async_2 = client.async_mutator_open(namespace, "thrift_test", future, 0);
client.async_mutator_set_cell(mutator_async_1, Cell(Key("py-k1","col", None), "py-v1-async"));
client.async_mutator_set_cell(mutator_async_2, Cell(Key("py-k1","col", None), "py-v2-async"));
client.async_mutator_flush(mutator_async_1);
client.async_mutator_flush(mutator_async_2);
num_results=0;
while True:
result = client.future_get_result(future, 0);
if(result.is_empty):
break
num_results+=1;
print result;
if (result.is_error or result.is_scan):
print "Unexpected result\n"
exit(1);
if (num_results>2):
print "Expected only 2 results\n"
exit(1)
if (num_results!=2):
print "Expected only 2 results\n"
exit(1)
if (client.future_is_cancelled(future) or client.future_is_full(future) or not (client.future_is_empty(future)) or client.future_has_outstanding(future)):
print "Future object in unexpected state"
exit(1)
client.async_mutator_close(mutator_async_1)
client.async_mutator_close(mutator_async_2)
color_scanner = client.async_scanner_open(namespace, "FruitColor", future, ScanSpec(None, None, None, 1));
location_scanner = client.async_scanner_open(namespace, "FruitLocation", future, ScanSpec(None, None, None, 1));
energy_scanner = client.async_scanner_open(namespace, "FruitEnergy", future, ScanSpec(None, None, None, 1));
expected_cells = 6;
num_cells = 0;
while True:
result = client.future_get_result(future, 0);
print result;
if (result.is_empty or result.is_error or not(result.is_scan) ):
print "Unexpected result\n"
exit(1);
for cell in result.cells:
print cell;
num_cells+=1;
if(num_cells >= 6):
client.future_cancel(future);
break;
if (not client.future_is_cancelled(future)):
print "Expected future ops to be cancelled\n"
exit(1)
print "regexp scanner example";
scanner = client.scanner_open(namespace, "thrift_test",
ScanSpec(None, None, None, 1, 0, None, None, ["col"], False,0, 0, "k", "v[24]"));
while True:
cells = client.scanner_get_cells(scanner)
if (len(cells) == 0):
break
print cells
client.scanner_close(scanner)
client.async_scanner_close(color_scanner);
client.async_scanner_close(location_scanner);
client.async_scanner_close(energy_scanner);
client.future_close(future);
client.namespace_close(namespace)
except:
print sys.exc_info()
raise
| gpl-3.0 | -526,401,585,103,323,140 | 32.162602 | 156 | 0.679333 | false |
earlbellinger/asteroseismology | grid/calibrate.py | 1 | 3590 | #### Calibrate a solar model
#### Author: Earl Bellinger ( [email protected] )
#### Stellar Ages & Galactic Evolution Group
#### Max-Planck-Institut fur Sonnensystemforschung
#### Department of Astronomy, Yale University
import numpy as np
import pandas as pd
from scipy import optimize
from os import path
from subprocess import Popen
from math import log10
Z_div_X_solar = 0.02293 # GS98 # 0.0245 # GN93 #
log10_Z_div_X_solar = np.log10(Z_div_X_solar)
constraint_names = ("log L", "log R", "Fe/H")
param_names = ("Y", "alpha", "Z")
param_init = [0.273449170177157, 1.83413390909832, 0.0197444964340224]
directory = 'calibrate_py'
print(directory)
def objective():
## minimize sum(log(model values / solar values)**2)
# searches in LOGS_MS subdirectory of the global 'directory' variable
hstry_file = path.join(directory, 'LOGS_MS', 'history.data')
if (not path.exists(hstry_file)):
return np.inf
hstry = pd.read_table(hstry_file, header=0, skiprows=5, delimiter='\s+') #header=1,
mdl = hstry.loc[hstry.shape[0]-1] #hstry[nrow(hstry),]
# [Fe/H] = log10 ( Z / X / (Z/X)_Sun )
mdl_Fe_H = mdl['log_surf_cell_z']-np.log10(mdl['surface_h1'])-log10_Z_div_X_solar
mdl_vals = [mdl['log_L'], mdl['log_R'], mdl_Fe_H]
print("*** Model values")
print(constraint_names, mdl_vals)
print('L', 10**mdl['log_L'], 'R', 10**mdl['log_R'])
result = sum([ii**2 for ii in mdl_vals])
if np.isfinite(result):
return log10(result)
return 10**10
### SEARCH
iteration = 0
best_val = np.inf
best_param = param_init
#run = function(params) {
def run(params):
global iteration
global best_val
global best_param
iteration = iteration + 1
print("**** iter:", iteration)
Y, alpha, Z = params
print(param_names, (Y, alpha, Z))
if (Y < 0.2 or Y > 0.4 or Z < 0 or Z > 0.04 or alpha < 1 or alpha > 3):
return 10**10
#if (Y < 0.23):
# Y = 0.23
#if (Y > 0.33):
# Y = 0.33
#if (Z < 0.01):
# Z = 0.01
#if (Z > 0.04):
# Z = 0.04
#if (alpha < 1):
# alpha = 1
#if (alpha > 3):
# alpha = 3
command = "./dispatch.sh" + \
' -Y ' + str(Y) + \
' -a ' + str(alpha) + \
' -o ' + '0' + \
' -Z ' + str(Z) + \
' -D ' + '1' + \
' -g ' + '1' + \
' -e ' + '0' + \
' -c ' + "4572000000" + \
' -d ' + directory
print(command)
#system(command)
process = Popen(command.split(), shell=False)
process.wait()
obj_val = objective()
print("**** objective value =", obj_val)
if (obj_val < best_val):
best_val = obj_val
print("*****", param_names, params)
best_param = params
print("***** New record!")
return obj_val
result = optimize.minimize(fun=run, x0=param_init, method='Nelder-Mead',
options={'disp': True,
'maxiter': 10000}) #,
#bounds=((0.25, 0.32), (1, 3), (0.012, 0.03)))
print("Optimization terminated. Saving best result")
Y, alpha, Z = result.x
command = "./dispatch.sh" + \
' -Y ' + str(Y) + \
' -a ' + str(alpha) + \
' -o ' + '0' + \
' -Z ' + str(Z) + \
' -D ' + '1' + \
' -g ' + '1' + \
' -e ' + '0' + \
' -c ' + "4572000000" + \
' -d ' + directory
print(command)
process = Popen(command.split(), shell=False)
process.wait()
print(result)
| gpl-2.0 | -8,822,781,440,106,951,000 | 26.72 | 88 | 0.51532 | false |
huajiahen/hotspot | backend/Busy/models.py | 1 | 1154 | # -*- coding:utf-8 -*-
from django.db.models import *
class Event(Model):
content = CharField(u'内容',max_length = 200)
starttime = IntegerField(u'开始时间')
endtime = IntegerField(u'结束时间')
#longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
#latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
longitude = FloatField(u'经度')
latitude = FloatField(u'纬度')
address = CharField(u'地点',max_length = 100)
hit = IntegerField(u'想去数',default = 0)
class Emergency(Model):
content = CharField(u'内容',max_length = 100)
#longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
#latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
longitude = FloatField(u'经度')
latitude = FloatField(u'纬度')
class Man(Model):
user_id = CharField(u'用户ID',max_length = 200)
longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
hadevent = BooleanField(u'是否参与事件',default = False)
| mit | -7,604,758,913,650,735,000 | 38.407407 | 72 | 0.662594 | false |
cmaclell/py_plan | py_plan/problems/blocksworld.py | 1 | 3681 | from operator import ne
from py_search.utils import compare_searches
from py_search.uninformed import depth_first_search
from py_search.uninformed import breadth_first_search
from py_search.uninformed import iterative_deepening_search
from py_plan.total_order import StateSpacePlanningProblem
from py_plan.base import Operator
move = Operator('move',
[('on', '?b', '?x'),
('block', '?b'),
('block', '?x'),
('block', '?y'),
('block', '?other'),
('block', '?other2'),
('not', ('on', '?other', '?b')),
('not', ('on', '?other2', '?y')),
# ('clear', '?b'),
# ('clear', '?y'),
(ne, '?b', '?x'),
(ne, '?b', '?y'),
(ne, '?x', '?y')],
[('on', '?b', '?y'),
# ('clear', '?x'),
('not', ('on', '?b', '?x')),
# ('not', ('clear', '?y'))
])
move_from_table = Operator('move_from_table',
[('on', '?b', 'Table'),
('block', '?other'),
('block', '?other2'),
('not', ('on', '?other', '?b')),
('not', ('on', '?other2', '?y')),
# ('clear', '?b'),
# ('clear', '?y'),
('block', '?b'),
('block', '?y'),
(ne, '?b', '?y')],
[('on', '?b', '?y'),
('not', ('on', '?b', 'Table')),
# ('not', ('clear', '?y'))
])
move_to_table = Operator('move_to_table',
[('on', '?b', '?x'),
('block', '?b'),
('block', '?x'),
('block', '?other'),
('not', ('on', '?other', '?b')),
# ('clear', '?b'),
(ne, '?b', '?x')],
[('on', '?b', 'Table'),
# ('clear', '?x'),
('not', ('on', '?b', '?x'))])
if __name__ == "__main__":
start = [('on', 'A', 'Table'),
('on', 'B', 'Table'),
('on', 'C', 'A'),
('block', 'A'),
('block', 'B'),
('block', 'C'),
# ('clear', 'B'),
# ('clear', 'C')
]
goal = [('on', 'A', 'B'),
('on', 'B', 'C'),
('on', 'C', 'Table')]
# start = [('on', 'A', 'Table'),
# ('on', 'B', 'Table'),
# ('on', 'C', 'Table'),
# ('block', 'A'),
# ('block', 'B'),
# ('block', 'C'),
# ('clear', 'A'),
# ('clear', 'B'),
# ('clear', 'C')]
def progression(x):
return breadth_first_search(x, forward=True, backward=False)
def regression(x):
return breadth_first_search(x, forward=False, backward=True)
def bidirectional(x):
return breadth_first_search(x, forward=True, backward=True)
p = StateSpacePlanningProblem(start, goal, [move_from_table,
move_to_table])
# print(next(best_first_search(p)).state)
compare_searches([p], [progression,
regression, bidirectional,
# iterative_deepening_search
])
print(next(progression(p)).path())
print(next(regression(p)).path())
| mit | 1,218,365,807,427,348,500 | 33.401869 | 68 | 0.32627 | false |
ntymtsiv/tempest | tempest/thirdparty/boto/test_ec2_instance_run.py | 1 | 14269 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception
from tempest.common.utils import data_utils
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.test import skip_because
from tempest.thirdparty.boto.test import BotoTestCase
from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
from tempest.thirdparty.boto.utils.wait import re_search_wait
from tempest.thirdparty.boto.utils.wait import state_wait
CONF = config.CONF
LOG = logging.getLogger(__name__)
class InstanceRunTest(BotoTestCase):
@classmethod
def setUpClass(cls):
super(InstanceRunTest, cls).setUpClass()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
cls.s3_client = cls.os.s3_client
cls.ec2_client = cls.os.ec2api_client
cls.zone = cls.ec2_client.get_good_zone()
cls.materials_path = CONF.boto.s3_materials_path
ami_manifest = CONF.boto.ami_manifest
aki_manifest = CONF.boto.aki_manifest
ari_manifest = CONF.boto.ari_manifest
cls.instance_type = CONF.boto.instance_type
cls.bucket_name = data_utils.rand_name("s3bucket-")
cls.keypair_name = data_utils.rand_name("keypair-")
cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
cls.keypair_name)
bucket = cls.s3_client.create_bucket(cls.bucket_name)
cls.addResourceCleanUp(cls.destroy_bucket,
cls.s3_client.connection_data,
cls.bucket_name)
s3_upload_dir(bucket, cls.materials_path)
cls.images = {"ami":
{"name": data_utils.rand_name("ami-name-"),
"location": cls.bucket_name + "/" + ami_manifest},
"aki":
{"name": data_utils.rand_name("aki-name-"),
"location": cls.bucket_name + "/" + aki_manifest},
"ari":
{"name": data_utils.rand_name("ari-name-"),
"location": cls.bucket_name + "/" + ari_manifest}}
for image in cls.images.itervalues():
image["image_id"] = cls.ec2_client.register_image(
name=image["name"],
image_location=image["location"])
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
image["image_id"])
for image in cls.images.itervalues():
def _state():
retr = cls.ec2_client.get_image(image["image_id"])
return retr.state
state = state_wait(_state, "available")
if state != "available":
for _image in cls.images.itervalues():
cls.ec2_client.deregister_image(_image["image_id"])
raise exceptions.EC2RegisterImageException(image_id=
image["image_id"])
@attr(type='smoke')
def test_run_idempotent_instances(self):
# EC2 run instances idempotently
def _run_instance(client_token):
reservation = self.ec2_client.run_instances(
image_id=self.images["ami"]["image_id"],
kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
client_token=client_token)
rcuk = self.addResourceCleanUp(self.destroy_reservation,
reservation)
return (reservation, rcuk)
def _terminate_reservation(reservation, rcuk):
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
reservation_1, rcuk_1 = _run_instance('token_1')
reservation_2, rcuk_2 = _run_instance('token_2')
reservation_1a, rcuk_1a = _run_instance('token_1')
self.assertIsNotNone(reservation_1)
self.assertIsNotNone(reservation_2)
self.assertIsNotNone(reservation_1a)
# same reservation for token_1
self.assertEqual(reservation_1.id, reservation_1a.id)
# Cancel cleanup -- since it's a duplicate, it's
# handled by rcuk1
self.cancelResourceCleanUp(rcuk_1a)
_terminate_reservation(reservation_1, rcuk_1)
_terminate_reservation(reservation_2, rcuk_2)
reservation_3, rcuk_3 = _run_instance('token_1')
self.assertIsNotNone(reservation_3)
# make sure we don't get the old reservation back
self.assertNotEqual(reservation_1.id, reservation_3.id)
# clean up
_terminate_reservation(reservation_3, rcuk_3)
@attr(type='smoke')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
@attr(type='smoke')
def test_run_stop_terminate_instance_with_tags(self):
# EC2 run, stop and terminate instance with tags
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.add_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
self.assertEqual(len(tags), 0, str(tags))
for instance in reservation.instances:
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
self.assertEqual(len(tags), 0, str(tags))
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
@skip_because(bug="1098891")
@attr(type='smoke')
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
for instance in reservation.instances:
instance.terminate()
try:
instance.update(validate=True)
except ValueError:
pass
except exception.EC2ResponseError as exc:
if self.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
pass
else:
raise
else:
self.assertNotEqual(instance.state, "running")
# NOTE(afazekas): doctored test case,
# with normal validation it would fail
@skip_because(bug="1182679")
@attr(type='smoke')
def test_integration_1(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup-")
group_desc = sec_group_name + " security group description "
security_group = self.ec2_client.create_security_group(sec_group_name,
group_desc)
self.addResourceCleanUp(self.destroy_security_group_wait,
security_group)
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1))
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22))
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(1, self.zone)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
# TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
# NOTE(afazekas): it may be reports availble before it is available
ssh = RemoteClient(address.public_ip,
CONF.compute.ssh_user,
pkey=self.keypair.material)
text = data_utils.rand_name("Pattern text for console output -")
resp = ssh.write_to_console(text)
self.assertFalse(resp)
def _output():
output = instance.get_console_output()
return output.output
re_search_wait(_output, text)
part_lines = ssh.get_partitions().split('\n')
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
volume.update(validate=True)
return volume.status
self.assertVolumeStatusWait(_volume_state, "in-use")
re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
def _part_state():
current = ssh.get_partitions().split('\n')
if current > part_lines:
return 'INCREASE'
if current < part_lines:
return 'DECREASE'
return 'EQUAL'
state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
# TODO(afazekas): Resource compare to the flavor settings
volume.detach()
self.assertVolumeStatusWait(_volume_state, "available")
re_search_wait(_volume_state, "available")
LOG.info("Volume %s state: %s", volume.id, volume.status)
state_wait(_part_state, 'DECREASE')
instance.stop()
address.disassociate()
self.assertAddressDissasociatedWait(address)
self.cancelResourceCleanUp(rcuk_da)
address.release()
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
# TODO(afazekas): Snapshot/volume read/write test case
| apache-2.0 | -516,099,398,364,878,460 | 40.479651 | 78 | 0.583152 | false |
citrix-openstack-build/horizon | horizon/tables/base.py | 1 | 53167 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import logging
from operator import attrgetter # noqa
import sys
from django.conf import settings # noqa
from django.core import urlresolvers
from django import forms
from django.http import HttpResponse # noqa
from django import template
from django.template.defaultfilters import truncatechars # noqa
from django.template.loader import render_to_string # noqa
from django.utils.datastructures import SortedDict # noqa
from django.utils.html import escape # noqa
from django.utils import http
from django.utils.http import urlencode # noqa
from django.utils.safestring import mark_safe # noqa
from django.utils import termcolors
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon.tables.actions import FilterAction # noqa
from horizon.tables.actions import LinkAction # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
STRING_SEPARATOR = "__"
class Column(html.HTMLElement):
""" A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized.
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to ``True``.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: allowed_data_types
A list of data types for which the link should be created.
Default is an empty list (``[]``).
When the list is empty and the ``link`` attribute is not None, all the
rows under this column will be links.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('yes', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
.. attribute:: display_choices
A tuple of tuples representing the possible values to substitute
the data when displayed in the column cell.
.. attribute:: empty_value
A string or callable to be used for cells which have no data.
Defaults to the string ``"-"``.
.. attribute:: summation
A string containing the name of a summation method to be used in
the generation of a summary row for this column. By default the
options are ``"sum"`` or ``"average"``, which behave as expected.
Optional.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
.. attribute:: classes
An iterable of CSS classes which should be added to this column.
Example: ``classes=('foo', 'bar')``.
.. attribute:: attrs
A dict of HTML attribute strings which should be added to this column.
Example: ``attrs={"data-foo": "bar"}``.
.. attribute:: truncate
An integer for the maximum length of the string in this column. If the
data in this column is larger than the supplied number, the data for
this column will be truncated and an ellipsis will be appended to the
truncated data.
Defaults to ``None``.
.. attribute:: link_classes
An iterable of CSS classes which will be added when the column's text
is displayed as a link.
Example: ``classes=('link-foo', 'link-bar')``.
Defaults to ``None``.
.. attribute:: wrap_list
Boolean value indicating whether the contents of this cell should be
wrapped in a ``<ul></ul>`` tag. Useful in conjunction with Django's
``unordered_list`` template filter. Defaults to ``False``.
"""
summation_methods = {
"sum": sum,
"average": lambda data: sum(data, 0.0) / len(data)
}
# Used to retain order when instantiating columns on a table
creation_counter = 0
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('yes', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=True,
link=None, allowed_data_types=[], hidden=False, attrs=None,
status=False, status_choices=None, display_choices=None,
empty_value=None, filters=None, classes=None, summation=None,
auto=None, truncate=None, link_classes=None, wrap_list=False):
self.classes = list(classes or getattr(self, "classes", []))
super(Column, self).__init__()
self.attrs.update(attrs or {})
if callable(transform):
self.transform = transform
self.name = transform.__name__
else:
self.transform = unicode(transform)
self.name = self.transform
# Empty string is a valid value for verbose_name
if verbose_name is None:
verbose_name = self.transform.title()
else:
verbose_name = verbose_name
self.auto = auto
self.sortable = sortable
self.verbose_name = verbose_name
self.link = link
self.allowed_data_types = allowed_data_types
self.hidden = hidden
self.status = status
self.empty_value = empty_value or '-'
self.filters = filters or []
self.truncate = truncate
self.link_classes = link_classes or []
self.wrap_list = wrap_list
if status_choices:
self.status_choices = status_choices
self.display_choices = display_choices
if summation is not None and summation not in self.summation_methods:
raise ValueError("Summation method %s must be one of %s."
% (summation,
", ".join(self.summation_methods.keys())))
self.summation = summation
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
if self.sortable and not self.auto:
self.classes.append("sortable")
if self.hidden:
self.classes.append("hide")
if self.link is not None:
self.classes.append('anchor')
def __unicode__(self):
return unicode(self.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
def get_raw_data(self, datum):
"""
Returns the raw data for this column, before any filters or formatting
are applied to it. This is useful when doing calculations on data in
the table.
"""
# Callable transformations
if callable(self.transform):
data = self.transform(datum)
# Basic object lookups
elif hasattr(datum, self.transform):
data = getattr(datum, self.transform, None)
# Dict lookups
elif isinstance(datum, collections.Iterable) and \
self.transform in datum:
data = datum.get(self.transform)
else:
if settings.DEBUG:
msg = _("The attribute %(attr)s doesn't exist on "
"%(obj)s.") % {'attr': self.transform, 'obj': datum}
msg = termcolors.colorize(msg, **PALETTE['ERROR'])
LOG.warning(msg)
data = None
return data
def get_data(self, datum):
"""
Returns the final display data for this column from the given inputs.
The return value will be either the attribute specified for this column
or the return value of the attr:`~horizon.tables.Column.transform`
method for this column.
"""
datum_id = self.table.get_object_id(datum)
if datum_id in self.table._data_cache[self]:
return self.table._data_cache[self][datum_id]
data = self.get_raw_data(datum)
display_value = None
if self.display_choices:
display_value = [display for (value, display) in
self.display_choices
if value.lower() == (data or '').lower()]
if display_value:
data = display_value[0]
else:
for filter_func in self.filters:
data = filter_func(data)
if data and self.truncate:
data = truncatechars(data, self.truncate)
self.table._data_cache[self][datum_id] = data
return self.table._data_cache[self][datum_id]
def get_link_url(self, datum):
""" Returns the final value for the column's ``link`` property.
If ``allowed_data_types`` of this column is not empty and the datum
has an assigned type, check if the datum's type is in the
``allowed_data_types`` list. If not, the datum won't be displayed
as a link.
If ``link`` is a callable, it will be passed the current data object
and should return a URL. Otherwise ``get_link_url`` will attempt to
call ``reverse`` on ``link`` with the object's id as a parameter.
Failing that, it will simply return the value of ``link``.
"""
if self.allowed_data_types:
data_type_name = self.table._meta.data_type_name
data_type = getattr(datum, data_type_name, None)
if data_type and (data_type not in self.allowed_data_types):
return None
obj_id = self.table.get_object_id(datum)
if callable(self.link):
return self.link(datum)
try:
return urlresolvers.reverse(self.link, args=(obj_id,))
except urlresolvers.NoReverseMatch:
return self.link
def get_summation(self):
"""
Returns the summary value for the data in this column if a
valid summation method is specified for it. Otherwise returns ``None``.
"""
if self.summation not in self.summation_methods:
return None
summation_function = self.summation_methods[self.summation]
data = [self.get_raw_data(datum) for datum in self.table.data]
data = filter(lambda datum: datum is not None, data)
if len(data):
summation = summation_function(data)
for filter_func in self.filters:
summation = filter_func(summation)
return summation
else:
return None
class Row(html.HTMLElement):
""" Represents a row in the table.
When iterated, the ``Row`` instance will yield each of its cells.
Rows are capable of AJAX updating, with a little added work:
The ``ajax`` property needs to be set to ``True``, and
subclasses need to define a ``get_data`` method which returns a data
object appropriate for consumption by the table (effectively the "get"
lookup versus the table's "list" lookup).
The automatic update interval is configurable by setting the key
``ajax_poll_interval`` in the ``HORIZON_CONFIG`` dictionary.
Default: ``2500`` (measured in milliseconds).
.. attribute:: table
The table which this row belongs to.
.. attribute:: datum
The data object which this row represents.
.. attribute:: id
A string uniquely representing this row composed of the table name
and the row data object's identifier.
.. attribute:: cells
The cells belonging to this row stored in a ``SortedDict`` object.
This attribute is populated during instantiation.
.. attribute:: status
Boolean value representing the status of this row calculated from
the values of the table's ``status_columns`` if they are set.
.. attribute:: status_class
Returns a css class for the status of the row based on ``status``.
.. attribute:: ajax
Boolean value to determine whether ajax updating for this row is
enabled.
.. attribute:: ajax_action_name
String that is used for the query parameter key to request AJAX
updates. Generally you won't need to change this value.
Default: ``"row_update"``.
"""
ajax = False
ajax_action_name = "row_update"
def __init__(self, table, datum=None):
super(Row, self).__init__()
self.table = table
self.datum = datum
self.selected = False
if self.datum:
self.load_cells()
else:
self.id = None
self.cells = []
def load_cells(self, datum=None):
"""
Load the row's data (either provided at initialization or as an
argument to this function), initiailize all the cells contained
by this row, and set the appropriate row properties which require
the row's data to be determined.
This function is called automatically by
:meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is
provided. However, by not providing the data during initialization
this function allows for the possibility of a two-step loading
pattern when you need a row instance but don't yet have the data
available.
"""
# Compile all the cells on instantiation.
table = self.table
if datum:
self.datum = datum
else:
datum = self.datum
cells = []
for column in table.columns.values():
if column.auto == "multi_select":
widget = forms.CheckboxInput(check_test=lambda value: False)
# Convert value to string to avoid accidental type conversion
data = widget.render('object_ids',
unicode(table.get_object_id(datum)))
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "actions":
data = table.render_row_actions(datum)
table._data_cache[column][table.get_object_id(datum)] = data
else:
data = column.get_data(datum)
cell = Cell(datum, data, column, self)
cells.append((column.name or column.auto, cell))
self.cells = SortedDict(cells)
if self.ajax:
interval = conf.HORIZON_CONFIG['ajax_poll_interval']
self.attrs['data-update-interval'] = interval
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.classes.append("ajax-update")
# Add the row's status class and id to the attributes to be rendered.
self.classes.append(self.status_class)
id_vals = {"table": self.table.name,
"sep": STRING_SEPARATOR,
"id": table.get_object_id(datum)}
self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals
self.attrs['id'] = self.id
# Add the row's display name if available
display_name = table.get_object_display(datum)
if display_name:
self.attrs['data-display'] = escape(display_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __iter__(self):
return iter(self.cells.values())
@property
def status(self):
column_names = self.table._meta.status_columns
if column_names:
statuses = dict([(column_name, self.cells[column_name].status) for
column_name in column_names])
return self.table.calculate_row_status(statuses)
@property
def status_class(self):
column_names = self.table._meta.status_columns
if column_names:
return self.table.get_row_status_class(self.status)
else:
return ''
def render(self):
return render_to_string("horizon/common/_data_table_row.html",
{"row": self})
def get_cells(self):
""" Returns the bound cells for this row in order. """
return self.cells.values()
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode({"table": self.table.name,
"action": self.ajax_action_name,
"obj_id": self.table.get_object_id(self.datum)})
return "%s?%s" % (table_url, params)
def get_data(self, request, obj_id):
"""
Fetches the updated data for the row based on the object id
passed in. Must be implemented by a subclass to allow AJAX updating.
"""
raise NotImplementedError("You must define a get_data method on %s"
% self.__class__.__name__)
class Cell(html.HTMLElement):
""" Represents a single cell in the table. """
def __init__(self, datum, data, column, row, attrs=None, classes=None):
self.classes = classes or getattr(self, "classes", [])
super(Cell, self).__init__()
self.attrs.update(attrs or {})
self.datum = datum
self.data = data
self.column = column
self.row = row
self.wrap_list = column.wrap_list
def __repr__(self):
return '<%s: %s, %s>' % (self.__class__.__name__,
self.column.name,
self.row.id)
@property
def value(self):
"""
Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum)
if data is None:
if callable(self.column.empty_value):
data = self.column.empty_value(self.datum)
else:
data = self.column.empty_value
except Exception:
data = None
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
if self.url:
link_classes = ' '.join(self.column.link_classes)
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s" class="%s">%s</a>' %
(self.url, link_classes, escape(data)))
return data
@property
def url(self):
if self.column.link:
url = self.column.get_link_url(self.datum)
if url:
return url
else:
return None
@property
def status(self):
""" Gets the status for the column based on the cell's data. """
# Deal with status column mechanics based in this cell's data
if hasattr(self, '_status'):
return self._status
if self.column.status or \
self.column.name in self.column.table._meta.status_columns:
#returns the first matching status found
data_value_lower = unicode(self.data).lower()
for status_name, status_value in self.column.status_choices:
if unicode(status_name).lower() == data_value_lower:
self._status = status_value
return self._status
self._status = None
return self._status
def get_status_class(self, status):
""" Returns a css class name determined by the status value. """
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_default_classes(self):
""" Returns a flattened string of the cell's CSS classes. """
if not self.url:
self.column.classes = [cls for cls in self.column.classes
if cls != "anchor"]
column_class_string = self.column.get_final_attrs().get('class', "")
classes = set(column_class_string.split(" "))
if self.column.status:
classes.add(self.get_status_class(self.status))
return list(classes)
class DataTableOptions(object):
""" Contains options for :class:`.DataTable` objects.
.. attribute:: name
A short name or slug for the table.
.. attribute:: verbose_name
A more verbose name for the table meant for display purposes.
.. attribute:: columns
A list of column objects or column names. Controls ordering/display
of the columns in the table.
.. attribute:: table_actions
A list of action classes derived from the
:class:`~horizon.tables.Action` class. These actions will handle tasks
such as bulk deletion, etc. for multiple objects at once.
.. attribute:: row_actions
A list similar to ``table_actions`` except tailored to appear for
each row. These actions act on a single object at a time.
.. attribute:: actions_column
Boolean value to control rendering of an additional column containing
the various actions for each row. Defaults to ``True`` if any actions
are specified in the ``row_actions`` option.
.. attribute:: multi_select
Boolean value to control rendering of an extra column with checkboxes
for selecting multiple objects in the table. Defaults to ``True`` if
any actions are specified in the ``table_actions`` option.
.. attribute:: filter
Boolean value to control the display of the "filter" search box
in the table actions. By default it checks whether or not an instance
of :class:`.FilterAction` is in :attr:`.table_actions`.
.. attribute:: template
String containing the template which should be used to render the
table. Defaults to ``"horizon/common/_data_table.html"``.
.. attribute:: context_var_name
The name of the context variable which will contain the table when
it is rendered. Defaults to ``"table"``.
.. attribute:: pagination_param
The name of the query string parameter which will be used when
paginating this table. When using multiple tables in a single
view this will need to be changed to differentiate between the
tables. Default: ``"marker"``.
.. attribute:: status_columns
A list or tuple of column names which represents the "state"
of the data object being represented.
If ``status_columns`` is set, when the rows are rendered the value
of this column will be used to add an extra class to the row in
the form of ``"status_up"`` or ``"status_down"`` for that row's
data.
The row status is used by other Horizon components to trigger tasks
such as dynamic AJAX updating.
.. attribute:: row_class
The class which should be used for rendering the rows of this table.
Optional. Default: :class:`~horizon.tables.Row`.
.. attribute:: column_class
The class which should be used for handling the columns of this table.
Optional. Default: :class:`~horizon.tables.Column`.
.. attribute:: mixed_data_type
A toggle to indicate if the table accepts two or more types of data.
Optional. Default: :``False``
.. attribute:: data_types
A list of data types that this table would accept. Default to be an
empty list, but if the attibute ``mixed_data_type`` is set to ``True``,
then this list must have at least one element.
.. attribute:: data_type_name
The name of an attribute to assign to data passed to the table when it
accepts mix data. Default: ``"_table_data_type"``
.. attribute:: footer
Boolean to control whether or not to show the table's footer.
Default: ``True``.
.. attribute:: permissions
A list of permission names which this table requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
def __init__(self, options):
self.name = getattr(options, 'name', self.__class__.__name__)
verbose_name = getattr(options, 'verbose_name', None) \
or self.name.title()
self.verbose_name = verbose_name
self.columns = getattr(options, 'columns', None)
self.status_columns = getattr(options, 'status_columns', [])
self.table_actions = getattr(options, 'table_actions', [])
self.row_actions = getattr(options, 'row_actions', [])
self.row_class = getattr(options, 'row_class', Row)
self.column_class = getattr(options, 'column_class', Column)
self.pagination_param = getattr(options, 'pagination_param', 'marker')
self.browser_table = getattr(options, 'browser_table', None)
self.footer = getattr(options, 'footer', True)
self.no_data_message = getattr(options,
"no_data_message",
_("No items to display."))
self.permissions = getattr(options, 'permissions', [])
# Set self.filter if we have any FilterActions
filter_actions = [action for action in self.table_actions if
issubclass(action, FilterAction)]
if len(filter_actions) > 1:
raise NotImplementedError("Multiple filter actions is not "
"currently supported.")
self.filter = getattr(options, 'filter', len(filter_actions) > 0)
if len(filter_actions) == 1:
self._filter_action = filter_actions.pop()
else:
self._filter_action = None
self.template = getattr(options,
'template',
'horizon/common/_data_table.html')
self.row_actions_template = \
'horizon/common/_data_table_row_actions.html'
self.table_actions_template = \
'horizon/common/_data_table_table_actions.html'
self.context_var_name = unicode(getattr(options,
'context_var_name',
'table'))
self.actions_column = getattr(options,
'actions_column',
len(self.row_actions) > 0)
self.multi_select = getattr(options,
'multi_select',
len(self.table_actions) > 0)
# Set runtime table defaults; not configurable.
self.has_more_data = False
# Set mixed data type table attr
self.mixed_data_type = getattr(options, 'mixed_data_type', False)
self.data_types = getattr(options, 'data_types', [])
# If the data_types has more than 2 elements, set mixed_data_type
# to True automatically.
if len(self.data_types) > 1:
self.mixed_data_type = True
# However, if the mixed_data_type is set to True manually and the
# the data_types is empty, raise an errror.
if self.mixed_data_type and len(self.data_types) <= 1:
raise ValueError("If mixed_data_type is set to True in class %s, "
"data_types should has more than one types" %
self.name)
self.data_type_name = getattr(options,
'data_type_name',
"_table_data_type")
class DataTableMetaclass(type):
""" Metaclass to add options to DataTable class and collect columns. """
def __new__(mcs, name, bases, attrs):
# Process options from Meta
class_name = name
attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None))
# Gather columns; this prevents the column from being an attribute
# on the DataTable class and avoids naming conflicts.
columns = []
for attr_name, obj in attrs.items():
if issubclass(type(obj), (opts.column_class, Column)):
column_instance = attrs.pop(attr_name)
column_instance.name = attr_name
column_instance.classes.append('normal_column')
columns.append((attr_name, column_instance))
columns.sort(key=lambda x: x[1].creation_counter)
# Iterate in reverse to preserve final order
for base in bases[::-1]:
if hasattr(base, 'base_columns'):
columns = base.base_columns.items() + columns
attrs['base_columns'] = SortedDict(columns)
# If the table is in a ResourceBrowser, the column number must meet
# these limits because of the width of the browser.
if opts.browser_table == "navigation" and len(columns) > 1:
raise ValueError("You can only assign one column to %s."
% class_name)
if opts.browser_table == "content" and len(columns) > 2:
raise ValueError("You can only assign two columns to %s."
% class_name)
if opts.columns:
# Remove any columns that weren't declared if we're being explicit
# NOTE: we're iterating a COPY of the list here!
for column_data in columns[:]:
if column_data[0] not in opts.columns:
columns.pop(columns.index(column_data))
# Re-order based on declared columns
columns.sort(key=lambda x: attrs['_meta'].columns.index(x[0]))
# Add in our auto-generated columns
if opts.multi_select and opts.browser_table != "navigation":
multi_select = opts.column_class("multi_select",
verbose_name="",
auto="multi_select")
multi_select.classes.append('multi_select_column')
columns.insert(0, ("multi_select", multi_select))
if opts.actions_column:
actions_column = opts.column_class("actions",
verbose_name=_("Actions"),
auto="actions")
actions_column.classes.append('actions_column')
columns.append(("actions", actions_column))
# Store this set of columns internally so we can copy them per-instance
attrs['_columns'] = SortedDict(columns)
# Gather and register actions for later access since we only want
# to instantiate them once.
# (list() call gives deterministic sort order, which sets don't have.)
actions = list(set(opts.row_actions) | set(opts.table_actions))
actions.sort(key=attrgetter('name'))
actions_dict = SortedDict([(action.name, action())
for action in actions])
attrs['base_actions'] = actions_dict
if opts._filter_action:
# Replace our filter action with the instantiated version
opts._filter_action = actions_dict[opts._filter_action.name]
# Create our new class!
return type.__new__(mcs, name, bases, attrs)
class DataTable(object):
""" A class which defines a table with all data and associated actions.
.. attribute:: name
String. Read-only access to the name specified in the
table's Meta options.
.. attribute:: multi_select
Boolean. Read-only access to whether or not this table
should display a column for multi-select checkboxes.
.. attribute:: data
Read-only access to the data this table represents.
.. attribute:: filtered_data
Read-only access to the data this table represents, filtered by
the :meth:`~horizon.tables.FilterAction.filter` method of the table's
:class:`~horizon.tables.FilterAction` class (if one is provided)
using the current request's query parameters.
"""
__metaclass__ = DataTableMetaclass
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
self.request = request
self.data = data
self.kwargs = kwargs
self._needs_form_wrapper = needs_form_wrapper
self._no_data_message = self._meta.no_data_message
self.breadcrumb = None
self.current_item_id = None
self.permissions = self._meta.permissions
# Create a new set
columns = []
for key, _column in self._columns.items():
column = copy.copy(_column)
column.table = self
columns.append((key, column))
self.columns = SortedDict(columns)
self._populate_data_cache()
# Associate these actions with this table
for action in self.base_actions.values():
action.table = self
self.needs_summary_row = any([col.summation
for col in self.columns.values()])
def __unicode__(self):
return unicode(self._meta.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._meta.name)
@property
def name(self):
return self._meta.name
@property
def footer(self):
return self._meta.footer
@property
def multi_select(self):
return self._meta.multi_select
@property
def filtered_data(self):
if not hasattr(self, '_filtered_data'):
self._filtered_data = self.data
if self._meta.filter and self._meta._filter_action:
action = self._meta._filter_action
filter_string = self.get_filter_string()
request_method = self.request.method
needs_preloading = (not filter_string
and request_method == 'GET'
and action.needs_preloading)
valid_method = (request_method == action.method)
if (filter_string and valid_method) or needs_preloading:
if self._meta.mixed_data_type:
self._filtered_data = action.data_type_filter(self,
self.data,
filter_string)
else:
self._filtered_data = action.filter(self,
self.data,
filter_string)
return self._filtered_data
def get_filter_string(self):
filter_action = self._meta._filter_action
param_name = filter_action.get_param_name()
filter_string = self.request.POST.get(param_name, '')
return filter_string
def _populate_data_cache(self):
self._data_cache = {}
# Set up hash tables to store data points for each column
for column in self.get_columns():
self._data_cache[column] = {}
def _filter_action(self, action, request, datum=None):
try:
# Catch user errors in permission functions here
row_matched = True
if self._meta.mixed_data_type:
row_matched = action.data_type_matched(datum)
return action._allowed(request, datum) and row_matched
except Exception:
LOG.exception("Error while checking action permissions.")
return None
def is_browser_table(self):
if self._meta.browser_table:
return True
return False
def render(self):
""" Renders the table using the template from the table options. """
table_template = template.loader.get_template(self._meta.template)
extra_context = {self._meta.context_var_name: self}
context = template.RequestContext(self.request, extra_context)
return table_template.render(context)
def get_absolute_url(self):
""" Returns the canonical URL for this table.
This is used for the POST action attribute on the form element
wrapping the table. In many cases it is also useful for redirecting
after a successful action on the table.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the table was requested.
"""
return self.request.get_full_path().partition('?')[0]
def get_empty_message(self):
""" Returns the message to be displayed when there is no data. """
return self._no_data_message
def get_object_by_id(self, lookup):
"""
Returns the data object from the table's dataset which matches
the ``lookup`` parameter specified. An error will be raised if
the match is not a single data object.
We will convert the object id and ``lookup`` to unicode before
comparison.
Uses :meth:`~horizon.tables.DataTable.get_object_id` internally.
"""
if not isinstance(lookup, unicode):
lookup = unicode(str(lookup), 'utf-8')
matches = []
for datum in self.data:
obj_id = self.get_object_id(datum)
if not isinstance(obj_id, unicode):
obj_id = unicode(str(obj_id), 'utf-8')
if obj_id == lookup:
matches.append(datum)
if len(matches) > 1:
raise ValueError("Multiple matches were returned for that id: %s."
% matches)
if not matches:
raise exceptions.Http302(self.get_absolute_url(),
_('No match returned for the id "%s".')
% lookup)
return matches[0]
@property
def has_actions(self):
"""
Boolean. Indicates whether there are any available actions on this
table.
"""
if not self.base_actions:
return False
return any(self.get_table_actions()) or any(self._meta.row_actions)
@property
def needs_form_wrapper(self):
"""
Boolean. Indicates whather this table should be rendered wrapped in
a ``<form>`` tag or not.
"""
# If needs_form_wrapper is explicitly set, defer to that.
if self._needs_form_wrapper is not None:
return self._needs_form_wrapper
# Otherwise calculate whether or not we need a form element.
return self.has_actions
def get_table_actions(self):
""" Returns a list of the action instances for this table. """
bound_actions = [self.base_actions[action.name] for
action in self._meta.table_actions]
return [action for action in bound_actions if
self._filter_action(action, self.request)]
def get_row_actions(self, datum):
""" Returns a list of the action instances for a specific row. """
bound_actions = []
for action in self._meta.row_actions:
# Copy to allow modifying properties per row
bound_action = copy.copy(self.base_actions[action.name])
bound_action.attrs = copy.copy(bound_action.attrs)
bound_action.datum = datum
# Remove disallowed actions.
if not self._filter_action(bound_action,
self.request,
datum):
continue
# Hook for modifying actions based on data. No-op by default.
bound_action.update(self.request, datum)
# Pre-create the URL for this link with appropriate parameters
if issubclass(bound_action.__class__, LinkAction):
bound_action.bound_url = bound_action.get_link_url(datum)
bound_actions.append(bound_action)
return bound_actions
def render_table_actions(self):
""" Renders the actions specified in ``Meta.table_actions``. """
template_path = self._meta.table_actions_template
table_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_table_actions()
extra_context = {"table_actions": bound_actions}
if self._meta.filter and \
self._filter_action(self._meta._filter_action, self.request):
extra_context["filter"] = self._meta._filter_action
context = template.RequestContext(self.request, extra_context)
return table_actions_template.render(context)
def render_row_actions(self, datum):
"""
Renders the actions specified in ``Meta.row_actions`` using the
current row data. """
template_path = self._meta.row_actions_template
row_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_row_actions(datum)
extra_context = {"row_actions": bound_actions,
"row_id": self.get_object_id(datum)}
context = template.RequestContext(self.request, extra_context)
return row_actions_template.render(context)
@staticmethod
def parse_action(action_string):
"""
Parses the ``action`` parameter (a string) sent back with the
POST data. By default this parses a string formatted as
``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns
each of the pieces. The ``row_id`` is optional.
"""
if action_string:
bits = action_string.split(STRING_SEPARATOR)
bits.reverse()
table = bits.pop()
action = bits.pop()
try:
object_id = bits.pop()
except IndexError:
object_id = None
return table, action, object_id
def take_action(self, action_name, obj_id=None, obj_ids=None):
"""
Locates the appropriate action and routes the object
data to it. The action should return an HTTP redirect
if successful, or a value which evaluates to ``False``
if unsuccessful.
"""
# See if we have a list of ids
obj_ids = obj_ids or self.request.POST.getlist('object_ids')
action = self.base_actions.get(action_name, None)
if not action or action.method != self.request.method:
# We either didn't get an action or we're being hacked. Goodbye.
return None
# Meanhile, back in Gotham...
if not action.requires_input or obj_id or obj_ids:
if obj_id:
obj_id = self.sanitize_id(obj_id)
if obj_ids:
obj_ids = [self.sanitize_id(i) for i in obj_ids]
# Single handling is easy
if not action.handles_multiple:
response = action.single(self, self.request, obj_id)
# Otherwise figure out what to pass along
else:
# Preference given to a specific id, since that implies
# the user selected an action for just one row.
if obj_id:
obj_ids = [obj_id]
response = action.multiple(self, self.request, obj_ids)
return response
elif action and action.requires_input and not (obj_id or obj_ids):
messages.info(self.request,
_("Please select a row before taking that action."))
return None
@classmethod
def check_handler(cls, request):
""" Determine whether the request should be handled by this table. """
if request.method == "POST" and "action" in request.POST:
table, action, obj_id = cls.parse_action(request.POST["action"])
elif "table" in request.GET and "action" in request.GET:
table = request.GET["table"]
action = request.GET["action"]
obj_id = request.GET.get("obj_id", None)
else:
table = action = obj_id = None
return table, action, obj_id
def maybe_preempt(self):
"""
Determine whether the request should be handled by a preemptive action
on this table or by an AJAX row update before loading any data.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name:
# Handle AJAX row updating.
new_row = self._meta.row_class(self)
if new_row.ajax and new_row.ajax_action_name == action_name:
try:
datum = new_row.get_data(request, obj_id)
new_row.load_cells(datum)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(new_row.render())
else:
return HttpResponse(status=error.status_code)
preemptive_actions = [action for action in
self.base_actions.values() if action.preempt]
if action_name:
for action in preemptive_actions:
if action.name == action_name:
handled = self.take_action(action_name, obj_id)
if handled:
return handled
return None
def maybe_handle(self):
"""
Determine whether the request should be handled by any action on this
table after data has been loaded.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name and action_name:
action_names = [action.name for action in
self.base_actions.values() if not action.preempt]
# do not run preemptive actions here
if action_name in action_names:
return self.take_action(action_name, obj_id)
return None
def sanitize_id(self, obj_id):
""" Override to modify an incoming obj_id to match existing
API data types or modify the format.
"""
return obj_id
def get_object_id(self, datum):
""" Returns the identifier for the object this row will represent.
By default this returns an ``id`` attribute on the given object,
but this can be overridden to return other values.
.. warning::
Make sure that the value returned is a unique value for the id
otherwise rendering issues can occur.
"""
return datum.id
def get_object_display(self, datum):
""" Returns a display name that identifies this object.
By default, this returns a ``name`` attribute from the given object,
but this can be overriden to return other values.
"""
if hasattr(datum, 'name'):
return datum.name
return None
def has_more_data(self):
"""
Returns a boolean value indicating whether there is more data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_more_data
def get_marker(self):
"""
Returns the identifier for the last object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[-1]))
def get_pagination_string(self):
""" Returns the query parameter string to paginate this table. """
return "=".join([self._meta.pagination_param, self.get_marker()])
def calculate_row_status(self, statuses):
"""
Returns a boolean value determining the overall row status
based on the dictionary of column name to status mappings passed in.
By default, it uses the following logic:
#. If any statuses are ``False``, return ``False``.
#. If no statuses are ``False`` but any or ``None``, return ``None``.
#. If all statuses are ``True``, return ``True``.
This provides the greatest protection against false positives without
weighting any particular columns.
The ``statuses`` parameter is passed in as a dictionary mapping
column names to their statuses in order to allow this function to
be overridden in such a way as to weight one column's status over
another should that behavior be desired.
"""
values = statuses.values()
if any([status is False for status in values]):
return False
elif any([status is None for status in values]):
return None
else:
return True
def get_row_status_class(self, status):
"""
Returns a css class name determined by the status value. This class
name is used to indicate the status of the rows in the table if
any ``status_columns`` have been specified.
"""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_columns(self):
""" Returns this table's columns including auto-generated ones."""
return self.columns.values()
def get_rows(self):
""" Return the row data for this table broken out by columns. """
rows = []
try:
for datum in self.filtered_data:
row = self._meta.row_class(self, datum)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
row.classes.append('current_selected')
rows.append(row)
except Exception:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
return rows
| apache-2.0 | 670,515,343,822,811,100 | 37.779723 | 79 | 0.584479 | false |
jromang/retina-old | distinclude/spyderlib/interpreter.py | 1 | 11927 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Shell Interpreter"""
import sys
import atexit
import threading
import ctypes
import os
import re
import os.path as osp
import pydoc
from subprocess import Popen, PIPE
from code import InteractiveConsole
# Local imports:
from spyderlib.utils.dochelpers import isdefined
from spyderlib.utils import encoding
# Force Python to search modules in the current directory first:
sys.path.insert(0, '')
def guess_filename(filename):
"""Guess filename"""
if osp.isfile(filename):
return filename
if not filename.endswith('.py'):
filename += '.py'
for path in [os.getcwdu()]+sys.path:
fname = osp.join(path, filename)
if osp.isfile(fname):
return fname
elif osp.isfile(fname+'.py'):
return fname+'.py'
elif osp.isfile(fname+'.pyw'):
return fname+'.pyw'
return filename
class Interpreter(InteractiveConsole, threading.Thread):
"""Interpreter, executed in a separate thread"""
p1 = ">>> "
p2 = "... "
def __init__(self, namespace=None, exitfunc=None,
Output=None, WidgetProxy=None, debug=False):
"""
namespace: locals send to InteractiveConsole object
commands: list of commands executed at startup
"""
InteractiveConsole.__init__(self, namespace)
threading.Thread.__init__(self)
self._id = None
self.exit_flag = False
self.debug = debug
# Execution Status
self.more = False
if exitfunc is not None:
atexit.register(exitfunc)
self.namespace = self.locals
self.namespace['__name__'] = '__main__'
self.namespace['execfile'] = self.execfile
self.namespace['runfile'] = self.runfile
self.namespace['help'] = self.help_replacement
# Capture all interactive input/output
self.initial_stdout = sys.stdout
self.initial_stderr = sys.stderr
self.initial_stdin = sys.stdin
# Create communication pipes
pr, pw = os.pipe()
self.stdin_read = os.fdopen(pr, "r")
self.stdin_write = os.fdopen(pw, "w", 0)
self.stdout_write = Output()
self.stderr_write = Output()
self.widget_proxy = WidgetProxy()
self.redirect_stds()
#------ Standard input/output
def redirect_stds(self):
"""Redirects stds"""
if not self.debug:
sys.stdout = self.stdout_write
sys.stderr = self.stderr_write
sys.stdin = self.stdin_read
def restore_stds(self):
"""Restore stds"""
if not self.debug:
sys.stdout = self.initial_stdout
sys.stderr = self.initial_stderr
sys.stdin = self.initial_stdin
def help_replacement(self, text=None, interactive=False):
"""For help() support"""
if text is not None and not interactive:
return pydoc.help(text)
elif text is None:
pyver = "%d.%d" % (sys.version_info[0], sys.version_info[1])
self.write("""
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://www.python.org/doc/tut/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
""" % pyver)
else:
text = text.strip()
try:
eval("pydoc.help(%s)" % text)
except (NameError, SyntaxError):
print "no Python documentation found for '%r'" % text
self.write(os.linesep)
self.widget_proxy.new_prompt("help> ")
inp = self.raw_input()
if inp.strip():
self.help_replacement(inp, interactive=True)
else:
self.write("""
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
""")
def run_command(self, cmd, new_prompt=True):
"""Run command in interpreter"""
if cmd == 'exit()':
self.exit_flag = True
self.write('\n')
return
# -- Special commands type I
# (transformed into commands executed in the interpreter)
# ? command
special_pattern = r"^%s (?:r\')?(?:u\')?\"?\'?([a-zA-Z0-9_\.]+)"
run_match = re.match(special_pattern % 'run', cmd)
help_match = re.match(r'^([a-zA-Z0-9_\.]+)\?$', cmd)
cd_match = re.match(r"^\!cd \"?\'?([a-zA-Z0-9_ \.]+)", cmd)
if help_match:
cmd = 'help(%s)' % help_match.group(1)
# run command
elif run_match:
filename = guess_filename(run_match.groups()[0])
cmd = 'runfile(r"%s", args=None)' % filename
# !cd system command
elif cd_match:
cmd = 'import os; os.chdir(r"%s")' % cd_match.groups()[0].strip()
# -- End of Special commands type I
# -- Special commands type II
# (don't need code execution in interpreter)
xedit_match = re.match(special_pattern % 'xedit', cmd)
edit_match = re.match(special_pattern % 'edit', cmd)
clear_match = re.match(r"^clear ([a-zA-Z0-9_, ]+)", cmd)
# (external) edit command
if xedit_match:
filename = guess_filename(xedit_match.groups()[0])
self.widget_proxy.edit(filename, external_editor=True)
# local edit command
elif edit_match:
filename = guess_filename(edit_match.groups()[0])
if osp.isfile(filename):
self.widget_proxy.edit(filename)
else:
self.stderr_write.write(
"No such file or directory: %s\n" % filename)
# remove reference (equivalent to MATLAB's clear command)
elif clear_match:
varnames = clear_match.groups()[0].replace(' ', '').split(',')
for varname in varnames:
try:
self.namespace.pop(varname)
except KeyError:
pass
# Execute command
elif cmd.startswith('!'):
# System ! command
pipe = Popen(cmd[1:], shell=True,
stdin=PIPE, stderr=PIPE, stdout=PIPE)
txt_out = encoding.transcode( pipe.stdout.read() )
txt_err = encoding.transcode( pipe.stderr.read().rstrip() )
if txt_err:
self.stderr_write.write(txt_err)
if txt_out:
self.stdout_write.write(txt_out)
self.stdout_write.write('\n')
self.more = False
# -- End of Special commands type II
else:
# Command executed in the interpreter
# self.widget_proxy.set_readonly(True)
self.more = self.push(cmd)
# self.widget_proxy.set_readonly(False)
if new_prompt:
self.widget_proxy.new_prompt(self.p2 if self.more else self.p1)
if not self.more:
self.resetbuffer()
def run(self):
"""Wait for input and run it"""
while not self.exit_flag:
self.run_line()
def run_line(self):
line = self.stdin_read.readline()
if self.exit_flag:
return
# Remove last character which is always '\n':
self.run_command(line[:-1])
def get_thread_id(self):
"""Return thread id"""
if self._id is None:
for thread_id, obj in threading._active.items():
if obj is self:
self._id = thread_id
return self._id
def raise_keyboard_interrupt(self):
if self.isAlive():
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.get_thread_id(),
ctypes.py_object(KeyboardInterrupt))
return True
else:
return False
def closing(self):
"""Actions to be done before restarting this interpreter"""
pass
def execfile(self, filename):
"""Exec filename"""
source = open(filename, 'r').read()
try:
try:
name = filename.encode('ascii')
except UnicodeEncodeError:
name = '<executed_script>'
code = compile(source, name, "exec")
except (OverflowError, SyntaxError):
InteractiveConsole.showsyntaxerror(self, filename)
else:
self.runcode(code)
def runfile(self, filename, args=None):
"""
Run filename
args: command line arguments (string)
"""
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
self.namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in args.split():
sys.argv.append(arg)
self.execfile(filename)
sys.argv = ['']
self.namespace.pop('__file__')
def eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
assert isinstance(text, (str, unicode))
try:
return eval(text, self.locals), True
except:
return None, False
def is_defined(self, objtxt, force_import=False):
"""Return True if object is defined"""
return isdefined(objtxt, force_import=force_import,
namespace=self.locals)
#===========================================================================
# InteractiveConsole API
#===========================================================================
def push(self, line):
"""
Push a line of source text to the interpreter
The line should not have a trailing newline; it may have internal
newlines. The line is appended to a buffer and the interpreter’s
runsource() method is called with the concatenated contents of the
buffer as source. If this indicates that the command was executed
or invalid, the buffer is reset; otherwise, the command is incomplete,
and the buffer is left as it was after the line was appended.
The return value is True if more input is required, False if the line
was dealt with in some way (this is the same as runsource()).
"""
return InteractiveConsole.push(self, line)
def resetbuffer(self):
"""Remove any unhandled source text from the input buffer"""
InteractiveConsole.resetbuffer(self)
| gpl-3.0 | 547,717,842,301,860,860 | 35.26875 | 80 | 0.545035 | false |
danforthcenter/plantcv | tests/tests.py | 1 | 288502 | #!/usr/bin/env python
import pytest
import os
import shutil
import json
import numpy as np
import cv2
import sys
import pandas as pd
from plotnine import ggplot
from plantcv import plantcv as pcv
import plantcv.learn
import plantcv.parallel
import plantcv.utils
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
import matplotlib.pyplot as plt
import dask
from dask.distributed import Client
from skimage import img_as_ubyte
PARALLEL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parallel_data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_IMG_DIR = "images"
TEST_IMG_DIR2 = "images_w_date"
TEST_SNAPSHOT_DIR = "snapshots"
TEST_PIPELINE = os.path.join(PARALLEL_TEST_DATA, "plantcv-script.py")
META_FIELDS = {"imgtype": 0, "camera": 1, "frame": 2, "zoom": 3, "lifter": 4, "gain": 5, "exposure": 6, "id": 7}
VALID_META = {
# Camera settings
"camera": {
"label": "camera identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"imgtype": {
"label": "image type",
"datatype": "<class 'str'>",
"value": "none"
},
"zoom": {
"label": "camera zoom setting",
"datatype": "<class 'str'>",
"value": "none"
},
"exposure": {
"label": "camera exposure setting",
"datatype": "<class 'str'>",
"value": "none"
},
"gain": {
"label": "camera gain setting",
"datatype": "<class 'str'>",
"value": "none"
},
"frame": {
"label": "image series frame identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"lifter": {
"label": "imaging platform height setting",
"datatype": "<class 'str'>",
"value": "none"
},
# Date-Time
"timestamp": {
"label": "datetime of image",
"datatype": "<class 'datetime.datetime'>",
"value": None
},
# Sample attributes
"id": {
"label": "image identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"plantbarcode": {
"label": "plant barcode identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"treatment": {
"label": "treatment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"cartag": {
"label": "plant carrier identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Experiment attributes
"measurementlabel": {
"label": "experiment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Other
"other": {
"label": "other identifier",
"datatype": "<class 'str'>",
"value": "none"
}
}
METADATA_COPROCESS = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_VIS_ONLY = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_NIR_ONLY = {
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
# Set the temp directory for dask
dask.config.set(temporary_directory=TEST_TMPDIR)
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##############################
# Tests for the parallel subpackage
# ##############################
def test_plantcv_parallel_workflowconfig_save_config_file():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_save_config_file")
os.mkdir(cache_dir)
# Define output path/filename
template_file = os.path.join(cache_dir, "config.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Save template file
config.save_config(config_file=template_file)
assert os.path.exists(template_file)
def test_plantcv_parallel_workflowconfig_import_config_file():
# Define input path/filename
config_file = os.path.join(PARALLEL_TEST_DATA, "workflow_config_template.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# import config file
config.import_config(config_file=config_file)
assert config.cluster == "LocalCluster"
def test_plantcv_parallel_workflowconfig_validate_config():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_validate_config")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
# Validate config
assert config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_startdate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_startdate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.start_date = "2020-05-10"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_enddate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_enddate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.end_date = "2020-05-10"
config.timestampformat = "%Y%m%d"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_metadata_terms():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_metadata_terms")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set an incorrect metadata term
config.filename_metadata.append("invalid")
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_filename_metadata():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_filename_metadata")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Do not set required filename_metadata
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_cluster():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_cluster")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set invalid cluster type
config.cluster = "MyCluster"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_metadata_parser_snapshots():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_snapshots_coimg():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots_coimg", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "FAKE"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014"
config.end_date = "2014"
config.timestampformat = '%Y' # no date in filename so check date range and date_format are ignored
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
config.include_all_subdirs = False
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == expected
def test_plantcv_parallel_metadata_parser_multivalue_filter():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": ["VIS", "NIR"]}
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR, 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR, 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117779',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'
}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_multivalue_filter_nomatch():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": ["VIS", "PSII"]}
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR, 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'
}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_regex():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.delimiter = r'(VIS)_(SV)_(\d+)_(z1)_(h1)_(g0)_(e82)_(\d+)'
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_images_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR"}
config.start_date = "1970-01-01 00_00_00"
config.end_date = "1970-01-01 00_00_00"
config.timestampformat = "%Y-%m-%d %H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{4}-\d{2}-\d{2} \d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_no_default_dates():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_no_default_dates", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV", "id": "117770"}
config.start_date = None
config.end_date = None
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_workflowconfig_subdaily_timestampformat():
'''
timestampformats with only hours and smaller units of time were failing if the script was run earlier in the day than the images were taken. this was fixed by setting end_date to 23-59-59 if we don't detect the year-month-day
'''
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_IMG_DIR2, "test_plantcv_parallel_metadata_parser_subdaily_timestampformat", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR", "camera": "SV"}
config.start_date = None
config.end_date = None
config.timestampformat = "%H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'NIR_SV_0_z1_h1_g0_e65_23_59_59.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images_w_date','NIR_SV_0_z1_h1_g0_e65_23_59_59.jpg'),
'imgtype': 'NIR',
'camera': 'SV',
'frame': '0',
'zoom': 'z1',
'lifter': 'h1',
'gain': 'g0',
'exposure': 'e65',
'timestamp': '23_59_59',
'measurementlabel': 'none',
'cartag':'none',
'id': 'none',
'treatment': 'none',
'plantbarcode': 'none',
'other': 'none'
}
}
def test_plantcv_parallel_check_date_range_wrongdateformat():
start_date = 10
end_date = 10
img_time = '2010-10-10'
with pytest.raises(SystemExit, match=r'does not match format'):
date_format = '%Y%m%d'
_ = plantcv.parallel.check_date_range(
start_date, end_date, img_time, date_format)
def test_plantcv_parallel_metadata_parser_snapshot_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshot_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_fail_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_fail_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"cartag": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_NIR_ONLY
def test_plantcv_parallel_metadata_parser_images_with_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_with_frame", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame",
"output.json")
config.filename_metadata = ["imgtype", "camera", "X", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_camera():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame", "output.json")
config.filename_metadata = ["imgtype", "X", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'none',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'none',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_job_builder_single_image():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_single_image")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
jobs = plantcv.parallel.job_builder(meta=METADATA_VIS_ONLY, config=config)
image_name = list(METADATA_VIS_ONLY.keys())[0]
result_file = os.path.join(cache_dir, image_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', METADATA_VIS_ONLY[image_name]['path'], '--outdir',
cache_dir, '--result', result_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_job_builder_coprocess():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_coprocess")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
config.coprocess = "NIR"
jobs = plantcv.parallel.job_builder(meta=METADATA_COPROCESS, config=config)
img_names = list(METADATA_COPROCESS.keys())
vis_name = img_names[0]
vis_path = METADATA_COPROCESS[vis_name]['path']
result_file = os.path.join(cache_dir, vis_name + '.txt')
nir_name = img_names[1]
coresult_file = os.path.join(cache_dir, nir_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', vis_path, '--outdir', cache_dir, '--result', result_file,
'--coresult', coresult_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_multiprocess_create_dask_cluster_local():
client = plantcv.parallel.create_dask_cluster(cluster="LocalCluster", cluster_config={})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster():
client = plantcv.parallel.create_dask_cluster(cluster="HTCondorCluster", cluster_config={"cores": 1,
"memory": "1GB",
"disk": "1GB"})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster_invalid_cluster():
with pytest.raises(ValueError):
_ = plantcv.parallel.create_dask_cluster(cluster="Skynet", cluster_config={})
def test_plantcv_parallel_convert_datetime_to_unixtime():
unix_time = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m-%d")
assert unix_time == 0
def test_plantcv_parallel_convert_datetime_to_unixtime_bad_strptime():
with pytest.raises(SystemExit):
_ = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m")
def test_plantcv_parallel_multiprocess():
image_name = list(METADATA_VIS_ONLY.keys())[0]
image_path = os.path.join(METADATA_VIS_ONLY[image_name]['path'], image_name)
result_file = os.path.join(TEST_TMPDIR, image_name + '.txt')
jobs = [['python', TEST_PIPELINE, '--image', image_path, '--outdir', TEST_TMPDIR, '--result', result_file,
'--writeimg', '--other', 'on']]
# Create a dask LocalCluster client
client = Client(n_workers=1)
plantcv.parallel.multiprocess(jobs, client=client)
assert os.path.exists(result_file)
def test_plantcv_parallel_process_results():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
# Assert that the output JSON file matches the expected output JSON file
result_file = open(os.path.join(cache_dir, "appended_results.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "appended_results.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_new_output():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_new_output")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'new_result.json'))
# Assert output matches expected values
result_file = open(os.path.join(cache_dir, "new_result.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "new_result.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_valid_json():
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(PARALLEL_TEST_DATA, "valid.json"))
def test_plantcv_parallel_process_results_invalid_json():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_invalid_json")
os.mkdir(cache_dir)
# Move the test data to the tmp directory
shutil.copytree(os.path.join(PARALLEL_TEST_DATA, "bad_results"), os.path.join(cache_dir, "bad_results"))
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(cache_dir, "bad_results"),
json_file=os.path.join(cache_dir, "bad_results", "invalid.txt"))
# ####################################################################################################################
# ########################################### PLANTCV MAIN PACKAGE ###################################################
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
HYPERSPECTRAL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hyperspectral_data")
HYPERSPECTRAL_DATA = "darkReference"
HYPERSPECTRAL_WHITE = "darkReference_whiteReference"
HYPERSPECTRAL_DARK = "darkReference_darkReference"
HYPERSPECTRAL_HDR = "darkReference.hdr"
HYPERSPECTRAL_MASK = "darkReference_mask.png"
HYPERSPECTRAL_DATA_NO_DEFAULT = "darkReference2"
HYPERSPECTRAL_HDR_NO_DEFAULT = "darkReference2.hdr"
HYPERSPECTRAL_DATA_APPROX_PSEUDO = "darkReference3"
HYPERSPECTRAL_HDR_APPROX_PSEUDO = "darkReference3.hdr"
HYPERSPECTRAL_DATA_BAD_INTERLEAVE = "darkReference4"
HYPERSPECTRAL_HDR_BAD_INTERLEAVE = "darkReference4.hdr"
HYPERSPECTRAL_HDR_SMALL_RANGE = {'description': '{[HEADWALL Hyperspec III]}', 'samples': '800', 'lines': '1',
'bands': '978', 'header offset': '0', 'file type': 'ENVI Standard',
'interleave': 'bil', 'sensor type': 'Unknown', 'byte order': '0',
'default bands': '159,253,520', 'wavelength units': 'nm',
'wavelength': ['379.027', '379.663', '380.3', '380.936', '381.573', '382.209']}
FLUOR_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "photosynthesis_data")
FLUOR_IMG = "PSII_PSD_supopt_temp_btx623_22_rep1.DAT"
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_GRAY_SMALL = "input_gray_img_small.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
# Image from http://www.libpng.org/pub/png/png-OwlAlpha.html
# This image may be used, edited and reproduced freely.
TEST_INPUT_RGBA = "input_rgba.png"
TEST_INPUT_BAYER = "bayer_img.png"
TEST_INPUT_ROI_CONTOUR = "input_roi_contour.npz"
TEST_INPUT_ROI_HIERARCHY = "input_roi_hierarchy.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_INPUT_OBJECT_CONTOURS = "input_object_contours.npz"
TEST_INPUT_OBJECT_HIERARCHY = "input_object_hierarchy.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask_binary.png"
TEST_INPUT_MASK_OOB = "mask_outbounds.png"
TEST_INPUT_MASK_RESIZE = "input_mask_resize.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FDARK_LARGE = "FLUO_TV_DARK_large"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_MASK = "multi_ori_mask.jpg"
TEST_INPUT_MULTI_OBJECT = "roi_objects.npz"
TEST_INPUT_MULTI_CONTOUR = "multi_contours.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_MULTI_HIERARCHY = "multi_hierarchy.npz"
TEST_INPUT_VISUALIZE_CONTOUR = "roi_objects_visualize.npz"
TEST_INPUT_VISUALIZE_HIERARCHY = "roi_obj_hierarchy_visualize.npz"
TEST_INPUT_VISUALIZE_CLUSTERS = "clusters_i_visualize.npz"
TEST_INPUT_VISUALIZE_BACKGROUND = "visualize_background_img.png"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_GENOTXT_TOO_MANY = "cluster_names_too_many.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_INPUT_SKELETON = 'input_skeleton.png'
TEST_INPUT_SKELETON_PRUNED = 'input_pruned_skeleton.png'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_PDFS_BAD = "naive_bayes_pdfs_bad.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
TEST_TARGET_IMG = "target_img.png"
TEST_TARGET_IMG_WITH_HEXAGON = "target_img_w_hexagon.png"
TEST_TARGET_IMG_TRIANGLE = "target_img copy.png"
TEST_SOURCE1_IMG = "source1_img.png"
TEST_SOURCE2_IMG = "source2_img.png"
TEST_TARGET_MASK = "mask_img.png"
TEST_TARGET_IMG_COLOR_CARD = "color_card_target.png"
TEST_SOURCE2_MASK = "mask2_img.png"
TEST_TARGET_MATRIX = "target_matrix.npz"
TEST_SOURCE1_MATRIX = "source1_matrix.npz"
TEST_SOURCE2_MATRIX = "source2_matrix.npz"
TEST_MATRIX_B1 = "matrix_b1.npz"
TEST_MATRIX_B2 = "matrix_b2.npz"
TEST_TRANSFORM1 = "transformation_matrix1.npz"
TEST_MATRIX_M1 = "matrix_m1.npz"
TEST_MATRIX_M2 = "matrix_m2.npz"
TEST_S1_CORRECTED = "source_corrected.png"
TEST_SKELETON_OBJECTS = "skeleton_objects.npz"
TEST_SKELETON_HIERARCHIES = "skeleton_hierarchies.npz"
TEST_THERMAL_ARRAY = "thermal_img.npz"
TEST_THERMAL_IMG_MASK = "thermal_img_mask.png"
TEST_INPUT_THERMAL_CSV = "FLIR2600.csv"
# TEST_BAD_MASK = "bad_mask_test.pkl"
# TEST_IM_BAD_NONE = "bad_mask_none.pkl"
# TEST_IM_BAD_BOTH = "bad_mask_both.pkl"
# TEST_IM_BAD_NAN = "bad_mask_nan.pkl"
# TEST_IM_BAD_INF = "bad_mask_inf.pkl"
PIXEL_VALUES = "pixel_inspector_rgb_values.txt"
# ##########################
# Tests for the main package
# ##########################
@pytest.mark.parametrize("debug", ["print", "plot"])
def test_plantcv_debug(debug, tmpdir):
from plantcv.plantcv._debug import _debug
# Create a test tmp directory
img_outdir = tmpdir.mkdir("sub")
pcv.params.debug = debug
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
_debug(visual=img, filename=os.path.join(img_outdir, TEST_INPUT_COLOR))
assert True
@pytest.mark.parametrize("datatype,value", [[list, []], [int, 2], [float, 2.2], [bool, True], [str, "2"], [dict, {}],
[tuple, ()], [None, None]])
def test_plantcv_outputs_add_observation(datatype, value):
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=datatype, value=value, label=[])
assert outputs.observations["default"]["test"]["value"] == value
def test_plantcv_outputs_add_observation_invalid_type():
# Create output instance
outputs = pcv.Outputs()
with pytest.raises(RuntimeError):
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=list, value=np.array([2]), label=[])
def test_plantcv_outputs_save_results_json_newfile(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_json_existing_file(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "data_results.txt")
shutil.copyfile(os.path.join(TEST_DATA, "data_results.txt"), outfile)
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_csv(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.csv")
testfile = os.path.join(TEST_DATA, "data_results.csv")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='string', trait='string variable', method='string', scale='none',
datatype=str, value="string", label="none")
outputs.add_observation(sample='default', variable='boolean', trait='boolean variable', method='boolean',
scale='none', datatype=bool, value=True, label="none")
outputs.add_observation(sample='default', variable='list', trait='list variable', method='list',
scale='none', datatype=list, value=[1, 2, 3], label=[1, 2, 3])
outputs.add_observation(sample='default', variable='tuple', trait='tuple variable', method='tuple',
scale='none', datatype=tuple, value=(1, 2), label=(1, 2))
outputs.add_observation(sample='default', variable='tuple_list', trait='list of tuples variable',
method='tuple_list', scale='none', datatype=list, value=[(1, 2), (3, 4)], label=[1, 2])
outputs.save_results(filename=outfile, outformat="csv")
with open(outfile, "r") as fp:
results = fp.read()
with open(testfile, "r") as fp:
test_results = fp.read()
assert results == test_results
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
_ = pcv.acute(obj=np.array(([[213, 190]], [[83, 61]], [[149, 246]])), win=84, thresh=192, mask=mask)
_ = pcv.acute(obj=np.array(([[3, 29]], [[31, 102]], [[161, 63]])), win=148, thresh=56, mask=mask)
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
# Test with debug = None
pcv.params.debug = None
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, label="prefix")
_ = pcv.acute_vertex(obj=[], win=5, thresh=15, sep=5, img=img)
_ = pcv.acute_vertex(obj=[], win=.01, thresh=.01, sep=1, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
# Test with debug = None
pcv.params.debug = None
acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(np.shape(acute), np.shape(TEST_ACUTE_RESULT)))
pcv.outputs.clear()
def test_plantcv_acute_vertex_bad_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(result, [0, ("NA", "NA")]))
pcv.outputs.clear()
def test_plantcv_analyze_bound_horizontal():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300, label="prefix")
pcv.outputs.clear()
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
_ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(pcv.outputs.observations["default"]) == 7
def test_plantcv_analyze_bound_horizontal_grayscale_image():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(np.shape(boundary_img1)) == 3
def test_plantcv_analyze_bound_horizontal_neg_y():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug=None, line position that will trigger -y
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=-1000)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=0)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=2056)
assert pcv.outputs.observations['default']['height_above_reference']['value'] == 713
def test_plantcv_analyze_bound_vertical():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
def test_plantcv_analyze_bound_vertical_grayscale_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
pcv.outputs.clear()
def test_plantcv_analyze_bound_vertical_neg_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug="plot", line position that will trigger -x
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=2454)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 441
def test_plantcv_analyze_bound_vertical_small_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug='plot', line position that will trigger -x, and two channel object
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1)
assert pcv.outputs.observations['default']['width_right_reference']['value'] == 441
def test_plantcv_analyze_color():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = "print"
# pcv.params.debug = "print"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
# Test with debug = "plot"
# pcv.params.debug = "plot"
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = None
# pcv.params.debug = None
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='rgb')
assert pcv.outputs.observations['default']['hue_median']['value'] == 84.0
def test_plantcv_analyze_color_incorrect_image():
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
#
#
def test_plantcv_analyze_color_bad_hist_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
def test_plantcv_analyze_color_incorrect_hist_plot_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="bgr")
def test_plantcv_analyze_nir():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=img, mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_nir_16bit():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=np.uint16(img), mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_object():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
pcv.outputs.clear()
assert len(obj_images) != 0
def test_plantcv_analyze_object_grayscale_input():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 1
def test_plantcv_analyze_object_zero_slope():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:11, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[11, 10]], [[12, 10]], [[13, 10]], [[14, 10]], [[15, 10]], [[16, 10]],
[[17, 10]], [[18, 10]], [[19, 10]], [[20, 10]], [[21, 10]], [[22, 10]], [[23, 10]],
[[24, 10]], [[25, 10]], [[26, 10]], [[27, 10]], [[28, 10]], [[29, 10]], [[30, 10]],
[[31, 10]], [[32, 10]], [[33, 10]], [[34, 10]], [[35, 10]], [[36, 10]], [[37, 10]],
[[38, 10]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]], [[34, 10]],
[[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]], [[27, 10]],
[[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]], [[20, 10]],
[[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]], [[13, 10]],
[[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2d():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[0:5, 45:49, 0] = 255
img[0:5, 0:5, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[45, 1]], [[45, 2]], [[45, 3]], [[45, 4]], [[46, 4]], [[47, 4]], [[48, 4]],
[[48, 3]], [[48, 2]], [[48, 1]], [[47, 1]], [[46, 1]], [[1, 1]], [[1, 2]],
[[1, 3]], [[1, 4]], [[2, 4]], [[3, 4]], [[4, 4]], [[4, 3]], [[4, 2]],
[[4, 1]], [[3, 1]], [[2, 1]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2e():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:15, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[10, 11]], [[10, 12]], [[10, 13]], [[10, 14]], [[11, 14]], [[12, 14]],
[[13, 14]], [[14, 14]], [[15, 14]], [[16, 14]], [[17, 14]], [[18, 14]], [[19, 14]],
[[20, 14]], [[21, 14]], [[22, 14]], [[23, 14]], [[24, 14]], [[25, 14]], [[26, 14]],
[[27, 14]], [[28, 14]], [[29, 14]], [[30, 14]], [[31, 14]], [[32, 14]], [[33, 14]],
[[34, 14]], [[35, 14]], [[36, 14]], [[37, 14]], [[38, 14]], [[39, 14]], [[39, 13]],
[[39, 12]], [[39, 11]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]],
[[34, 10]], [[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]],
[[27, 10]], [[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]],
[[20, 10]], [[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]],
[[13, 10]], [[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_small_contour():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_contour = [np.array([[[0, 0]], [[0, 50]], [[50, 50]], [[50, 0]]], dtype=np.int32)]
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert obj_images is None
def test_plantcv_analyze_thermal_values():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_thermal_values")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
# img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_THERMAL_IMG_MASK), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_THERMAL_ARRAY), encoding="latin1")
img = contours_npz['arr_0']
pcv.params.debug = None
thermal_hist = pcv.analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert thermal_hist is not None and pcv.outputs.observations['default']['median_temp']['value'] == 33.20922
def test_plantcv_apply_mask_white():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_white")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="white")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_black():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_black")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="black")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
hyper_array = pcv.hyperspectral.read_data(filename=spectral_filename)
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img_stacked, mask=img, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
masked_array = pcv.apply_mask(img=hyper_array.array_data, mask=img, mask_color="black")
assert np.mean(masked_array) == 13.97111260224949
def test_plantcv_apply_mask_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="wite")
def test_plantcv_auto_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=(20, 10), padding_y=(20, 10), color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], color='image')
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=2000, padding_y=2000, color='image')
# Test with debug = None
pcv.params.debug = None
cropped = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=20, padding_y=20, color='black')
x, y, z = np.shape(img1)
x1, y1, z1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='white')
x, y = np.shape(gray_img)
x1, y1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_bad_color_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='wite')
def test_plantcv_auto_crop_bad_padding_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x="one", padding_y=20, color='white')
def test_plantcv_canny_edge_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.canny_edge_detect(img=rgb_img, mask=mask, mask_color='white')
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.canny_edge_detect(img=img, thickness=2)
_ = pcv.canny_edge_detect(img=img)
# Test with debug = None
pcv.params.debug = None
edge_img = pcv.canny_edge_detect(img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(edge_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(edge_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_canny_edge_detect_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color="gray")
def test_plantcv_closing():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.closing(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.closing(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.closing(bin_img)
assert np.sum(filtered_img) == 16261860
def test_plantcv_closing_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.closing(rgb_img)
def test_plantcv_cluster_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, show_grid=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierarchy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierachy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierachy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_splitimg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file=None, filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=[[0]], contours=[],
hierarchy=np.array([[[1, -1, -1, -1]]]))
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file='multi', filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None, filenames=cluster_names)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None,
filenames=cluster_names_too_many)
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_cluster_contours_splitimg_grayscale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg_grayscale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
pcv.params.debug = None
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_color_palette():
# Return a color palette
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_random():
# Return a color palette in random order
pcv.params.color_sequence = "random"
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_saved():
# Return a color palette that was saved
pcv.params.saved_color_scale = [[0, 0, 0], [255, 255, 255]]
colors = pcv.color_palette(num=2, saved=True)
assert colors == [[0, 0, 0], [255, 255, 255]]
def test_plantcv_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img, _, _ = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50)
def test_plantcv_crop_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50, 4)
def test_plantcv_crop_position_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_three_channel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_three_channel, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE))
mask_non_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="top", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=45, y=2, v_pos="top", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_bad_input_x():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=-1, y=-1, v_pos="top", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_vpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="below", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_hpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="starboard")
def test_plantcv_dilate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_dilate")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
dilate_img = pcv.dilate(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(dilate_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(dilate_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_dilate_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.dilate(img, 1, 1)
def test_plantcv_erode():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_erode")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
erode_img = pcv.erode(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_erode_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.erode(img, 1, 1)
def test_plantcv_distance_transform():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_distance_transform")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = None
pcv.params.debug = None
distance_transform_img = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(distance_transform_img), np.shape(mask)))
def test_plantcv_fatal_error():
# Verify that the fatal_error function raises a RuntimeError
with pytest.raises(RuntimeError):
pcv.fatal_error("Test error")
def test_plantcv_fill():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill(bin_img=img, size=63632)
# Assert that the output image has the dimensions of the input image
# assert all([i == j] for i, j in zip(np.shape(fill_img), TEST_BINARY_DIM))
assert np.sum(fill_img) == 0
def test_plantcv_fill_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill(bin_img=img, size=1)
def test_plantcv_fill_holes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill_holes(bin_img=img)
pcv.params.debug = "plot"
_ = pcv.fill_holes(bin_img=img)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill_holes(bin_img=img)
assert np.sum(fill_img) > np.sum(img)
def test_plantcv_fill_holes_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill_holes(bin_img=img)
def test_plantcv_find_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = None
pcv.params.debug = None
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_find_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_flip():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_flip")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.flip(img=img, direction="horizontal")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.flip(img=img, direction="vertical")
_ = pcv.flip(img=img_binary, direction="vertical")
# Test with debug = None
pcv.params.debug = None
flipped_img = pcv.flip(img=img, direction="horizontal")
assert all([i == j] for i, j in zip(np.shape(flipped_img), TEST_COLOR_DIM))
def test_plantcv_flip_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.flip(img=img, direction="vert")
def test_plantcv_gaussian_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_gaussian_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
_ = pcv.gaussian_blur(img=img_color, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = None
pcv.params.debug = None
gaussian_img = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
imgavg = np.average(img)
gavg = np.average(gaussian_img)
assert gavg != imgavg
def test_plantcv_get_kernel_cross():
kernel = pcv.get_kernel(size=(3, 3), shape="cross")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_rectangle():
kernel = pcv.get_kernel(size=(3, 3), shape="rectangle")
assert (kernel == np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])).all()
def test_plantcv_get_kernel_ellipse():
kernel = pcv.get_kernel(size=(3, 3), shape="ellipse")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_bad_input_size():
with pytest.raises(ValueError):
_ = pcv.get_kernel(size=(1, 1), shape="ellipse")
def test_plantcv_get_kernel_bad_input_shape():
with pytest.raises(RuntimeError):
_ = pcv.get_kernel(size=(3, 1), shape="square")
def test_plantcv_get_nir_sv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR)
assert nirpath == nirpath1
def test_plantcv_get_nir_tv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS_TV)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR_TV)
assert nirpath == nirpath1
def test_plantcv_hist_equalization():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = None
pcv.params.debug = None
hist = pcv.hist_equalization(gray_img=img)
histavg = np.average(hist)
imgavg = np.average(img)
assert histavg != imgavg
def test_plantcv_hist_equalization_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), 1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.hist_equalization(gray_img=img)
def test_plantcv_image_add():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_add")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = None
pcv.params.debug = None
added_img = pcv.image_add(gray_img1=img1, gray_img2=img2)
assert all([i == j] for i, j in zip(np.shape(added_img), TEST_BINARY_DIM))
def test_plantcv_image_fusion():
# Read in test data
# 16-bit image
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN))
# 8-bit image
img2 = img_as_ubyte(img2)
fused_img = pcv.image_fusion(img1, img2, [480.0], [550.0, 640.0, 800.0])
assert str(type(fused_img)) == "<class 'plantcv.plantcv.classes.Spectral_data'>"
def test_plantcv_image_fusion_size_diff():
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), 0)
img2 = np.copy(img1)
img2 = img2[0:10, 0:10]
with pytest.raises(RuntimeError):
_ = pcv.image_fusion(img1, img2, [480.0, 550.0, 670.0], [480.0, 550.0, 670.0])
def test_plantcv_image_subtract():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_sub")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = 'print'
_ = pcv.image_subtract(img1, img2)
# Test with debug = "plot"
pcv.params.debug = 'plot'
_ = pcv.image_subtract(img1, img2)
# Test with debug = None
pcv.params.debug = None
new_img = pcv.image_subtract(img1, img2)
assert np.array_equal(new_img, np.zeros(np.shape(new_img), np.uint8))
def test_plantcv_image_subtract_fail():
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
# test
with pytest.raises(RuntimeError):
_ = pcv.image_subtract(img1, img2)
def test_plantcv_invert():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_invert")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.invert(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.invert(gray_img=img)
# Test with debug = None
pcv.params.debug = None
inverted_img = pcv.invert(gray_img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(inverted_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(inverted_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_landmark_reference_pt_dist():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_landmark_reference")
os.mkdir(cache_dir)
points_rescaled = [(0.0139, 0.2569), (0.2361, 0.2917), (0.3542, 0.3819), (0.3542, 0.4167), (0.375, 0.4236),
(0.7431, 0.3681), (0.8958, 0.3542), (0.9931, 0.3125), (0.1667, 0.5139), (0.4583, 0.8889),
(0.4931, 0.5903), (0.3889, 0.5694), (0.4792, 0.4306), (0.2083, 0.5417), (0.3194, 0.5278),
(0.3889, 0.375), (0.3681, 0.3472), (0.2361, 0.0139), (0.5417, 0.2292), (0.7708, 0.3472),
(0.6458, 0.3472), (0.6389, 0.5208), (0.6458, 0.625)]
centroid_rescaled = (0.4685, 0.4945)
bottomline_rescaled = (0.4685, 0.2569)
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=('a', 'b'), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=[(10, 1000)], centroid_r=(10, 10), bline_r=(10, 10))
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=(0, 0), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=points_rescaled, centroid_r=centroid_rescaled,
bline_r=bottomline_rescaled, label="prefix")
assert len(pcv.outputs.observations['prefix'].keys()) == 8
def test_plantcv_laplace_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_laplace_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = None
pcv.params.debug = None
lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(lp_img), TEST_GRAY_DIM))
def test_plantcv_logical_and():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_and")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
and_img = pcv.logical_and(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(and_img), TEST_BINARY_DIM))
def test_plantcv_logical_or():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_or")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
or_img = pcv.logical_or(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(or_img), TEST_BINARY_DIM))
def test_plantcv_logical_xor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_xor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
xor_img = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(xor_img), TEST_BINARY_DIM))
def test_plantcv_median_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = None
pcv.params.debug = None
blur_img = pcv.median_blur(gray_img=img, ksize=5)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(blur_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(blur_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_median_blur_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.median_blur(img, 5.)
def test_plantcv_naive_bayes_classifier():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = None
pcv.params.debug = None
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(mask), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(mask), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_naive_bayes_classifier_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS_BAD))
def test_plantcv_object_composition():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
_ = pcv.object_composition(img=img, contours=[], hierarchy=object_hierarchy)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Test with debug = None
pcv.params.debug = None
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_object_composition_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_within_frame():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask_ib = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_oob = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_OOB), -1)
in_bounds_ib = pcv.within_frame(mask=mask_ib, border_width=1, label="prefix")
in_bounds_oob = pcv.within_frame(mask=mask_oob, border_width=1)
assert (in_bounds_ib is True and in_bounds_oob is False)
def test_plantcv_within_frame_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
grayscale_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
with pytest.raises(RuntimeError):
_ = pcv.within_frame(grayscale_img)
def test_plantcv_opening():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.opening(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.opening(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.opening(bin_img)
assert np.sum(filtered_img) == 16184595
def test_plantcv_opening_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.opening(rgb_img)
def test_plantcv_output_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False)
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Remove tmp files in working direcctory
shutil.rmtree("ori-images")
shutil.rmtree("mask-images")
# Test with debug = None
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png',
outdir=cache_dir, mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_output_mask_true():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
pcv.params.debug_outdir = cache_dir
os.mkdir(cache_dir)
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir,
mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_plot_image_matplotlib_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pimg = pcv.visualize.pseudocolor(gray_img=img, mask=mask, min_value=10, max_value=200)
with pytest.raises(RuntimeError):
pcv.plot_image(pimg)
def test_plantcv_plot_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
try:
pcv.plot_image(img=img)
except RuntimeError:
assert False
# Assert that the image was plotted without error
assert True
def test_plantcv_print_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_bad_type():
with pytest.raises(RuntimeError):
pcv.print_image(img=[], filename="/dev/null")
def test_plantcv_print_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_matplotlib():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
# Input data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
plt.figure()
plt.imshow(img)
plot = plt.gcf()
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=plot, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_results(tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
pcv.print_results(filename=outfile)
assert os.path.exists(outfile)
def test_plantcv_readimage_native():
# Test with debug = None
pcv.params.debug = None
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='rgba')
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
# Assert that the image name returned equals the name of the input image
# Assert that the path of the image returned equals the path of the input image
# Assert that the dimensions of the returned image equals the expected dimensions
if img_name == TEST_INPUT_COLOR and path == TEST_DATA:
if all([i == j] for i, j in zip(np.shape(img), TEST_COLOR_DIM)):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_readimage_grayscale():
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="grey")
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="gray")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="rgb")
assert len(np.shape(img)) == 3
def test_plantcv_readimage_rgba_as_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_RGBA), mode="native")
assert np.shape(img)[2] == 3
def test_plantcv_readimage_csv():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_THERMAL_CSV), mode="csv")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_envi():
# Test with debug = None
pcv.params.debug = None
array_data = pcv.readimage(filename=os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA), mode="envi")
if sys.version_info[0] < 3:
assert len(array_data.array_type) == 8
def test_plantcv_readimage_bad_file():
with pytest.raises(RuntimeError):
_ = pcv.readimage(filename=TEST_INPUT_COLOR)
def test_plantcv_readbayer_default_bg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_readbayer_default_bg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
# Test with debug = "plot"
pcv.params.debug = "plot"
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_bad_input():
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, "no-image.png"), bayerpattern="GR", alg="default")
def test_plantcv_rectangle_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rectangle_mask(img=img_color, p1=(0, 0), p2=(2454, 2056), color="gray")
# Test with debug = None
pcv.params.debug = None
masked, hist, contour, heir = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="black")
maskedsum = np.sum(masked)
imgsum = np.sum(img)
assert maskedsum < imgsum
def test_plantcv_rectangle_mask_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="whit")
def test_plantcv_report_size_marker_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_report_size_marker_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
pcv.outputs.clear()
assert len(images) != 0
def test_plantcv_report_size_marker_define():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_grayscale_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# ROI contour
roi_contour = [np.array([[[0, 0]], [[0, 49]], [[49, 49]], [[49, 0]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_bad_marker_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='none',
objcolor='light', thresh_channel='s', thresh=120)
def test_plantcv_report_size_marker_bad_threshold_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel=None, thresh=120)
def test_plantcv_rgb2gray_cmyk():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
c = pcv.rgb2gray_cmyk(rgb_img=img, channel="c")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(c), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_cmyk_bad_channel():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
# Channel S is not in CMYK
_ = pcv.rgb2gray_cmyk(rgb_img=img, channel="s")
def test_plantcv_rgb2gray_hsv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_hsv")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = None
pcv.params.debug = None
s = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(s), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_hsv_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="l")
def test_plantcv_rgb2gray_lab():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_lab")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = None
pcv.params.debug = None
b = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(b), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_lab_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_lab(rgb_img=img, channel="v")
def test_plantcv_rgb2gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
gray = pcv.rgb2gray(rgb_img=img)
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(gray), TEST_GRAY_DIM))
def test_plantcv_roi2mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "plot"
_ = pcv.roi.roi2mask(img=img, contour=obj_contour)
pcv.params.debug = "print"
mask = pcv.roi.roi2mask(img=img, contour=obj_contour)
assert np.shape(mask)[0:2] == np.shape(img)[0:2] and np.sum(mask) == 255
def test_plantcv_roi_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="largest")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="partial")
# Test with debug = None and roi_type = cutto
pcv.params.debug = None
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="cutto")
# Test with debug = None
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy, roi_type="partial")
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_roi_objects_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.roi_objects(img=img, roi_type="cut", roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy)
def test_plantcv_roi_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_type="partial", roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy)
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_rotate():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
rotated = pcv.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rotate_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate_gray():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_scale_features():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scale_features")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position='NA')
# Test with debug = None
pcv.params.debug = None
points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(obj=obj_contour, mask=mask,
points=TEST_ACUTE_RESULT,
line_position=50)
assert len(points_rescaled) == 23
def test_plantcv_scale_features_bad_input():
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_scharr_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scharr_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
# Test with debug = "print"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = None
pcv.params.debug = None
scharr_img = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(scharr_img), TEST_GRAY_DIM))
def test_plantcv_shift_img():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_shift_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="bottom")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="right")
# Test with debug = "plot"
_ = pcv.shift_img(img=mask, number=300, side="left")
# Test with debug = None
pcv.params.debug = None
rotated = pcv.shift_img(img=img, number=300, side="top")
imgavg = np.average(img)
shiftavg = np.average(rotated)
assert shiftavg != imgavg
def test_plantcv_shift_img_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=-300, side="top")
def test_plantcv_shift_img_bad_side_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=300, side="starboard")
def test_plantcv_sobel_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = None
pcv.params.debug = None
sobel_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(sobel_img), TEST_GRAY_DIM))
def test_plantcv_stdev_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
pcv.params.debug = "plot"
_ = pcv.stdev_filter(img=img, ksize=11)
pcv.params.debug = "print"
filter_img = pcv.stdev_filter(img=img, ksize=11)
assert (np.shape(filter_img) == np.shape(img))
def test_plantcv_watershed_segmentation():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_watershed_segmentation")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
# Test with debug = None
pcv.params.debug = None
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
assert pcv.outputs.observations['default']['estimated_object_count']['value'] > 9
def test_plantcv_white_balance_gray_16bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_16bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_gray_8bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_8bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_rgb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 5, 5, 5))
def test_plantcv_white_balance_bad_mode_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='histogram', roi=(5, 5, 80, 80))
def test_plantcv_white_balance_bad_input_int():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5., 5, 5, 5))
def test_plantcv_x_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_x_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=np.array([[0, 0], [0, 0]]), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=(), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_small_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_bad_input():
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_x_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_y_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
_ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=(), mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[21, 11]], [[159, 155]], [[237, 11]])),
mask=np.array(([[38, 54]], [[144, 169]], [[81, 137]])), img=img)
# Test with debug = None
pcv.params.debug = None
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_small_obj():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.outputs.clear()
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_y_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.y_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_background_subtraction():
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
big_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Testing if background subtraction is actually still working.
# This should return an array whose sum is greater than one
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
fgmask = pcv.background_subtraction(background_image=big_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) > 0)
# The same foreground subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) == 0)
# The same background subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) == 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_debug():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_background_subtraction_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
# Test with debug = "print"
pcv.params.debug = "print"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# Test with debug = "plot"
pcv.params.debug = "plot"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_bad_img_type():
fg_color = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_gray = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND), 0)
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.background_subtraction(background_image=bg_gray, foreground_image=fg_color)
def test_plantcv_background_subtraction_different_sizes():
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
bg_shp = np.shape(bg_img) # type: tuple
bg_img_resized = cv2.resize(bg_img, (int(bg_shp[0] / 2), int(bg_shp[1] / 2)), interpolation=cv2.INTER_AREA)
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img_resized, foreground_image=fg_img)
assert np.sum(fgmask) > 0
def test_plantcv_spatial_clustering_dbscan():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_dbscan")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = "print"
_ = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
pcv.params.debug = "plot"
spmask = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_optics():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_optics")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
spmask = pcv.spatial_clustering(img, algorithm="OPTICS", min_cluster_size=100, max_distance=5000)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_badinput():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
with pytest.raises(NameError):
_ = pcv.spatial_clustering(img, algorithm="Hydra", min_cluster_size=5, max_distance=100)
# ##############################
# Tests for the learn subpackage
# ##############################
def test_plantcv_learn_naive_bayes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
maskdir = os.path.join(cache_dir, "masks")
if not os.path.exists(imgdir):
os.mkdir(imgdir)
if not os.path.exists(maskdir):
os.mkdir(maskdir)
# Copy and image and mask to the image/mask directories
shutil.copyfile(os.path.join(TEST_DATA, TEST_VIS_SMALL), os.path.join(imgdir, "image.png"))
shutil.copyfile(os.path.join(TEST_DATA, TEST_MASK_SMALL), os.path.join(maskdir, "image.png"))
# Run the naive Bayes training module
outfile = os.path.join(cache_dir, "naive_bayes_pdfs.txt")
plantcv.learn.naive_bayes(imgdir=imgdir, maskdir=maskdir, outfile=outfile, mkplots=True)
assert os.path.exists(outfile)
def test_plantcv_learn_naive_bayes_multiclass():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes_multiclass")
os.mkdir(cache_dir)
# Run the naive Bayes multiclass training module
outfile = os.path.join(cache_dir, "naive_bayes_multiclass_pdfs.txt")
plantcv.learn.naive_bayes_multiclass(samples_file=os.path.join(TEST_DATA, TEST_SAMPLED_RGB_POINTS), outfile=outfile,
mkplots=True)
assert os.path.exists(outfile)
# ####################################
# Tests for the morphology subpackage
# ####################################
def test_plantcv_morphology_segment_curvature():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_curvature")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects, label="prefix")
pcv.params.debug = "plot"
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects)
assert len(pcv.outputs.observations['default']['segment_curvature']['value']) == 22
def test_plantcv_morphology_check_cycles():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "print"
_ = pcv.morphology.check_cycles(mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.check_cycles(mask)
pcv.params.debug = None
_ = pcv.morphology.check_cycles(mask)
assert pcv.outputs.observations['default']['num_cycles']['value'] == 1
def test_plantcv_morphology_find_branch_pts():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton)
pcv.params.debug = None
branches = pcv.morphology.find_branch_pts(skel_img=skeleton)
assert np.sum(branches) == 9435
def test_plantcv_morphology_find_tips():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_tips")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_tips(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_tips(skel_img=skeleton)
pcv.params.debug = None
tips = pcv.morphology.find_tips(skel_img=skeleton)
assert np.sum(tips) == 9435
def test_plantcv_morphology_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.prune(skel_img=skeleton, size=1)
pcv.params.debug = "plot"
_ = pcv.morphology.prune(skel_img=skeleton, size=1, mask=skeleton)
pcv.params.debug = None
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_prune_size0():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=0)
assert np.sum(pruned_img) == np.sum(skeleton)
def test_plantcv_morphology_iterative_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img = pcv.morphology._iterative_prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_segment_skeleton():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_skeleton")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.segment_skeleton(skel_img=skeleton, mask=mask)
pcv.params.debug = "plot"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
assert len(segment_objects) == 73
def test_plantcv_morphology_fill_segments():
# Clear previous outputs
pcv.outputs.clear()
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
pcv.params.debug = None
_ = pcv.morphology.fill_segments(mask, obj)
tests = [pcv.outputs.observations['default']['segment_area']['value'][42] == 5529,
pcv.outputs.observations['default']['segment_area']['value'][20] == 5057,
pcv.outputs.observations['default']['segment_area']['value'][49] == 3323]
assert all(tests)
def test_plantcv_morphology_fill_segments_with_stem():
# Clear previous outputs
pcv.outputs.clear()
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
stem_obj = obj[0:4]
pcv.params.debug = None
_ = pcv.morphology.fill_segments(mask, obj, stem_obj)
num_objects = len(pcv.outputs.observations['default']['leaf_area']['value'])
assert num_objects == 69
def test_plantcv_morphology_segment_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 22
def test_plantcv_morphology_segment_angle_overflow():
# Clear previous outputs
pcv.outputs.clear()
# Don't prune, would usually give overflow error without extra if statement in segment_angle
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 73
def test_plantcv_morphology_segment_euclidean_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_eu_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_eu_length']['value']) == 22
def test_plantcv_morphology_segment_euclidean_length_bad_input():
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skel = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skel)
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
def test_plantcv_morphology_segment_path_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_path_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_path_length']['value']) == 22
def test_plantcv_morphology_skeletonize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_skeletonize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
input_skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = "plot"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
skeleton = pcv.morphology.skeletonize(mask=mask)
arr = np.array(skeleton == input_skeleton)
assert arr.all()
def test_plantcv_morphology_segment_sort():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_sort")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.params.debug = "print"
_ = pcv.morphology.segment_sort(skeleton, seg_objects, mask=skeleton)
pcv.params.debug = "plot"
leaf_obj, stem_obj = pcv.morphology.segment_sort(skeleton, seg_objects)
assert len(leaf_obj) == 36
def test_plantcv_morphology_segment_tangent_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2)
assert len(pcv.outputs.observations['default']['segment_tangent_angle']['value']) == 73
def test_plantcv_morphology_segment_id():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_id(skel, objs)
pcv.params.debug = "plot"
_, labeled_img = pcv.morphology.segment_id(skel, objs, mask=skel)
assert np.sum(labeled_img) > np.sum(skel)
def test_plantcv_morphology_segment_insertion_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 3, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
assert pcv.outputs.observations['default']['segment_insertion_angle']['value'][:6] == ['NA', 'NA', 'NA',
24.956918822001636,
50.7313343343401,
56.427712102130734]
def test_plantcv_morphology_segment_insertion_angle_bad_stem():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
stem_obj = [leaf_obj[0], leaf_obj[10]]
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
def test_plantcv_morphology_segment_combine():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
# Test with list of IDs input
_, new_objects = pcv.morphology.segment_combine([0, 1], seg_objects, skel)
assert len(new_objects) + 1 == len(seg_objects)
def test_plantcv_morphology_segment_combine_lists():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "print"
# Test with list of lists input
_, new_objects = pcv.morphology.segment_combine([[0, 1, 2], [3, 4]], seg_objects, skel)
assert len(new_objects) + 3 == len(seg_objects)
def test_plantcv_morphology_segment_combine_bad_input():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_, new_objects = pcv.morphology.segment_combine([0.5, 1.5], seg_objects, skel)
def test_plantcv_morphology_analyze_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_analyze_stem")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, segmented_img, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == -12.531776428222656
def test_plantcv_morphology_analyze_stem_bad_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
_, _ = pcv.morphology.segment_sort(pruned, seg_objects)
# print([stem_obj[3]])
# stem_obj = [stem_obj[3]]
stem_obj = [[[[1116, 1728]], [[1116, 1]]]]
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == 22877334.0
# ########################################
# Tests for the hyperspectral subpackage
# ########################################
def test_plantcv_hyperspectral_read_data_default():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_read_data_default")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
pcv.params.debug = "print"
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_no_default_bands():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_NO_DEFAULT)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_approx_pseudorgb():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_APPROX_PSEUDO)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_bad_interleave():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_BAD_INTERLEAVE)
with pytest.raises(RuntimeError):
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
def test_plantcv_spectral_index_ndvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_gdvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_gdvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_gdvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.gdvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_savi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_savi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_savi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.savi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ci_rededge():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ci_rededge")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ci_rededge_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ci_rededge(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri550():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri550")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri550_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri550(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri700():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri700")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri700_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri700(hsi=index_array, distance=20)
def test_plantcv_spectral_index_egi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_egi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
index_array = pcv.spectral_index.egi(rgb_img=rgb_img)
assert np.shape(index_array.array_data) == (2056, 2454) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_evi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.evi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mcari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mcari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mcari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mcari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mtci():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mtci")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mtci_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mtci(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ndre():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndre")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndre_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndre(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rgri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rgri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rgri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rgri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rvsi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rvsi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rvsi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rvsi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sipi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sipi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sipi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sipi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sr():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sr")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sr_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sr(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vi_green():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vi_green")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vi_green_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vi_green(hsi=index_array, distance=20)
def test_plantcv_spectral_index_wi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_wi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_wi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.wi(hsi=index_array, distance=20)
def test_plantcv_hyperspectral_analyze_spectral():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_spectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
# pcv.params.debug = "plot"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True)
# pcv.params.debug = "print"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
pcv.params.debug = None
_ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
assert len(pcv.outputs.observations['prefix']['spectral_frequencies']['value']) == 978
def test_plantcv_hyperspectral_analyze_index():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
# pcv.params.debug = "print"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
# pcv.params.debug = "plot"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_set_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_set_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True, min_bin=0, max_bin=1)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_auto_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin="auto", max_bin="auto")
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_outside_range_warning():
import io
from contextlib import redirect_stdout
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
f = io.StringIO()
with redirect_stdout(f):
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin=.5, max_bin=.55, label="i")
out = f.getvalue()
# assert os.listdir(cache_dir) is 0
assert out[0:10] == 'WARNING!!!'
def test_plantcv_hyperspectral_analyze_index_bad_input_mask():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_index():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
index_array.array_data = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_datatype():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=array_data, mask=mask_img)
def test_plantcv_hyperspectral_calibrate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_calibrate")
os.mkdir(cache_dir)
raw = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
white = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_WHITE)
dark = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DARK)
raw = pcv.hyperspectral.read_data(filename=raw)
white = pcv.hyperspectral.read_data(filename=white)
dark = pcv.hyperspectral.read_data(filename=dark)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
pcv.params.debug = "print"
calibrated = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
assert np.shape(calibrated.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_extract_wavelength():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_extract_wavelength")
os.mkdir(cache_dir)
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
pcv.params.debug = "print"
new = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
assert np.shape(new.array_data) == (1, 1600)
def test_plantcv_hyperspectral_avg_reflectance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
spectral = pcv.hyperspectral.read_data(filename=spectral)
avg_reflect = pcv.hyperspectral._avg_reflectance(spectral, mask=mask_img)
assert len(avg_reflect) == 978
def test_plantcv_hyperspectral_inverse_covariance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
inv_cov = pcv.hyperspectral._inverse_covariance(spectral)
assert np.shape(inv_cov) == (978, 978)
# ########################################
# Tests for the photosynthesis subpackage
# ########################################
def test_plantcv_photosynthesis_read_dat():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_photosynthesis_read_dat")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
fluor_filename = os.path.join(FLUOR_TEST_DATA, FLUOR_IMG)
_, _, _ = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
pcv.params.debug = "print"
fdark, fmin, fmax = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
assert np.sum(fmin) < np.sum(fmax)
def test_plantcv_photosynthesis_analyze_fvfm():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# filename = os.path.join(cache_dir, 'plantcv_fvfm_hist.png')
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
fvfm_images = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
assert len(fvfm_images) != 0
def test_plantcv_photosynthesis_analyze_fvfm_print_analysis_results():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
result_file = os.path.join(cache_dir, "results.txt")
pcv.print_results(result_file)
pcv.outputs.clear()
assert os.path.exists(result_file)
def test_plantcv_photosynthesis_analyze_fvfm_bad_fdark():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark + 3000, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
check = pcv.outputs.observations['default']['fdark_passed_qc']['value'] is False
assert check
def test_plantcv_photosynthesis_analyze_fvfm_bad_input():
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
# ##############################
# Tests for the roi subpackage
# ##############################
def test_plantcv_roi_from_binary_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_from_binary_image")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Create a binary image
bin_img = np.zeros(np.shape(rgb_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Create a binary image
bin_img = np.zeros(np.shape(gray_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_bad_binary_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Binary input is required but an RGB input is provided
with pytest.raises(RuntimeError):
_, _ = pcv.roi.from_binary_image(bin_img=rgb_img, img=rgb_img)
def test_plantcv_roi_rectangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_rectangle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=3000, img=rgb_img)
def test_plantcv_roi_circle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_circle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.circle(x=100, y=100, r=50, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.circle(x=100, y=100, r=50, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.circle(x=200, y=225, r=75, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 424, 1, 2)
def test_plantcv_roi_circle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.circle(x=200, y=225, r=75, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 424, 1, 2)
def test_plantcv_roi_circle_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.circle(x=50, y=225, r=75, img=rgb_img)
def test_plantcv_roi_ellipse():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_ellipse")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 360, 1, 2)
def test_plantcv_roi_ellipse_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 360, 1, 2)
def test_plantcv_roi_ellipse_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.ellipse(x=50, y=225, r1=75, r2=50, angle=0, img=rgb_img)
def test_plantcv_roi_multi():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi.multi(rgb_img, coord=[(25, 120), (100, 100)], radius=20)
# Test with debug = None
pcv.params.debug = None
rois1, roi_hierarchy1 = pcv.roi.multi(rgb_img, coord=(25, 120), radius=20, spacing=(10, 10), nrows=3, ncols=6)
# Assert the contours has 18 ROIs
assert len(rois1) == 18
def test_plantcv_roi_multi_bad_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The user must input a list of custom coordinates OR inputs to make a grid. Not both
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=[(25, 120), (100, 100)], radius=20, spacing=(10, 10), nrows=3, ncols=6)
def test_plantcv_roi_multi_bad_input_oob():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# nputs to make a grid make ROIs that go off the screen
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=(25000, 12000), radius=2, spacing=(1, 1), nrows=3, ncols=6)
def test_plantcv_roi_multi_bad_input_oob_list():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# All vertices in the list of centers must draw roi's that are inside the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=[(25000, 25000), (25000, 12000), (12000, 12000)], radius=20)
def test_plantcv_roi_custom():
# Read in test RGB image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = "plot"
cnt, hier = pcv.roi.custom(img=img, vertices=[[226, 1], [313, 184], [240, 202], [220, 229], [161, 171]])
assert np.shape(cnt) == (1, 5, 2)
def test_plantcv_roi_custom_bad_input():
# Read in test RGB image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# ROI goes out of bounds
with pytest.raises(RuntimeError):
_ = pcv.roi.custom(img=img, vertices=[[226, -1], [3130, 1848], [2404, 2029], [2205, 2298], [1617, 1761]])
# ##############################
# Tests for the transform subpackage
# ##############################
def test_plantcv_transform_get_color_matrix():
# load in target_matrix
matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
matrix_compare = matrix_file['arr_0']
# Read in rgb_img and gray-scale mask
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
# The result should be a len(np.unique(mask))-1 x 4 matrix
headers, matrix = pcv.transform.get_color_matrix(rgb_img, mask)
assert np.array_equal(matrix, matrix_compare)
def test_plantcv_transform_get_color_matrix_img():
# Read in two gray-scale images
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = pcv.transform.get_color_matrix(rgb_img, mask)
def test_plantcv_transform_get_color_matrix_mask():
# Read in two gray-scale images
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK))
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = pcv.transform.get_color_matrix(rgb_img, mask)
def test_plantcv_transform_get_matrix_m():
# load in comparison matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_compare_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_compare_b = matrix_b_file['arr_0']
# read in matrices
t_matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
t_matrix = t_matrix_file['arr_0']
s_matrix_file = np.load(os.path.join(TEST_DATA, TEST_SOURCE1_MATRIX), encoding="latin1")
s_matrix = s_matrix_file['arr_0']
# apply matrices to function
matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_plantcv_transform_get_matrix_m_unequal_data():
# load in comparison matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M2), encoding="latin1")
matrix_compare_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B2), encoding="latin1")
matrix_compare_b = matrix_b_file['arr_0']
# read in matrices
t_matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
t_matrix = t_matrix_file['arr_0']
s_matrix_file = np.load(os.path.join(TEST_DATA, TEST_SOURCE2_MATRIX), encoding="latin1")
s_matrix = s_matrix_file['arr_0']
# apply matrices to function
matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_plantcv_transform_calc_transformation_matrix():
# load in comparison matrices
matrix_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_compare = matrix_file['arr_0']
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
# apply to function
_, matrix_t = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b)
matrix_t = np.rint(matrix_t)
matrix_compare = np.rint(matrix_compare)
assert np.array_equal(matrix_t, matrix_compare)
def test_plantcv_transform_calc_transformation_matrix_b_incorrect():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
matrix_b = np.asmatrix(matrix_b, float)
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b.T)
def test_plantcv_transform_calc_transformation_matrix_not_mult():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b[:3])
def test_plantcv_transform_calc_transformation_matrix_not_mat():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m[:, 1], matrix_b[:, 1])
def test_plantcv_transform_apply_transformation():
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# Test with debug = None
pcv.params.debug = None
corrected_img = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# assert source and corrected have same shape
assert np.array_equal(corrected_img, corrected_compare)
def test_plantcv_transform_apply_transformation_incorrect_t():
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
with pytest.raises(RuntimeError):
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
def test_plantcv_transform_apply_transformation_incorrect_img():
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
def test_plantcv_transform_save_matrix():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# read in matrix
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# .npz filename
filename = os.path.join(cache_dir, 'test.npz')
pcv.transform.save_matrix(matrix_t, filename)
assert os.path.exists(filename) is True
def test_plantcv_transform_save_matrix_incorrect_filename():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# read in matrix
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# .npz filename
filename = "test"
with pytest.raises(RuntimeError):
pcv.transform.save_matrix(matrix_t, filename)
def test_plantcv_transform_load_matrix():
# read in matrix_t
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# test load function with matrix_t
matrix_t_loaded = pcv.transform.load_matrix(os.path.join(TEST_DATA, TEST_TRANSFORM1))
assert np.array_equal(matrix_t, matrix_t_loaded)
def test_plantcv_transform_correct_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_correct_color")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
matdir = os.path.join(cache_dir, "saved_matrices")
# Read in target, source, and gray-scale mask
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
output_path = os.path.join(matdir)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, cache_dir)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = None
pcv.params.debug = None
_, _, matrix_t, corrected_img = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(output_path, "target_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "source_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "transformation_matrix.npz")) is True])
def test_plantcv_transform_correct_color_output_dne():
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_correct_color_output_dne")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
# Read in target, source, and gray-scale mask
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
output_path = os.path.join(cache_dir, "saved_matrices_1") # output_directory that does not currently exist
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = None
pcv.params.debug = None
_, _, matrix_t, corrected_img = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(output_path, "target_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "source_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "transformation_matrix.npz")) is True])
def test_plantcv_transform_create_color_card_mask():
# Load target image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_create_color_card_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = None
pcv.params.debug = None
mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_plantcv_transform_quick_color_check():
# Load target image
t_matrix = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
target_matrix = t_matrix['arr_0']
s_matrix = np.load(os.path.join(TEST_DATA, TEST_SOURCE1_MATRIX), encoding="latin1")
source_matrix = s_matrix['arr_0']
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_quick_color_check")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
# Test with debug = None
pcv.params.debug = None
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
assert os.path.exists(os.path.join(cache_dir, "color_quick_check.png"))
def test_plantcv_transform_find_color_card():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
df, start, space = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='adaptgauss', blurry=False,
threshvalue=90)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = None
pcv.params.debug = None
mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_plantcv_transform_find_color_card_optional_parameters():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with threshold ='normal'
df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='normal', blurry=True,
background='light', threshvalue=90, label="prefix")
assert pcv.outputs.observations["prefix"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_otsu():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card_otsu")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with threshold ='normal'
df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='otsu', blurry=True,
background='light', threshvalue=90, label="prefix")
assert pcv.outputs.observations["prefix"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_optional_size_parameters():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size="mean")
assert pcv.outputs.observations["default"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_optional_size_parameters_none():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size=None)
assert pcv.outputs.observations.get("default") is None
def test_plantcv_transform_find_color_card_bad_record_chip_size():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size='averageeeed')
assert pcv.outputs.observations["default"]["color_chip_size"]["value"] is None
def test_plantcv_transform_find_color_card_bad_thresh_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='gaussian')
def test_plantcv_transform_find_color_card_bad_background_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, background='lite')
def test_plantcv_transform_find_color_card_bad_colorcard():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_WITH_HEXAGON))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img)
def test_plantcv_transform_rescale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_rescale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rescale(gray_img=gray_img, min_value=0, max_value=100)
pcv.params.debug = "plot"
rescaled_img = pcv.transform.rescale(gray_img=gray_img, min_value=0, max_value=100)
assert max(np.unique(rescaled_img)) == 100
def test_plantcv_transform_rescale_bad_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
_ = pcv.transform.rescale(gray_img=rgb_img)
def test_plantcv_transform_resize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_trancform_resize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (100, 100)
# Test with debug "print"
pcv.params.debug = "print"
_ = pcv.transform.resize(img=gray_img, size=size, interpolation="auto")
# Test with debug "plot"
pcv.params.debug = "plot"
resized_img = pcv.transform.resize(img=gray_img, size=size, interpolation="auto")
assert resized_img.shape == size
def test_plantcv_transform_resize_unsupported_method():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.resize(img=gray_img, size=(100, 100), interpolation="mymethod")
def test_plantcv_transform_resize_crop():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (20, 20)
resized_im = pcv.transform.resize(img=gray_img, size=size, interpolation=None)
assert resized_im.shape == size
def test_plantcv_transform_resize_pad():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (100, 100)
resized_im = pcv.transform.resize(img=gray_img, size=size, interpolation=None)
assert resized_im.shape == size
def test_plantcv_transform_resize_pad_crop_color():
color_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL))
size = (100, 100)
resized_im = pcv.transform.resize(img=color_img, size=size, interpolation=None)
assert resized_im.shape == (size[1], size[0], 3)
def test_plantcv_transform_resize_factor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_trancform_resize_factor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
# Resizing factors
factor_x = 0.5
factor_y = 0.2
# Test with debug "print"
pcv.params.debug = "print"
_ = pcv.transform.resize_factor(img=gray_img, factors=(factor_x, factor_y), interpolation="auto")
# Test with debug "plot"
pcv.params.debug = "plot"
resized_img = pcv.transform.resize_factor(img=gray_img, factors=(factor_x, factor_y), interpolation="auto")
output_size = resized_img.shape
expected_size = (int(gray_img.shape[0] * factor_y), int(gray_img.shape[1] * factor_x))
assert output_size == expected_size
def test_plantcv_transform_resize_factor_bad_input():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.resize_factor(img=gray_img, factors=(0, 2), interpolation="auto")
def test_plantcv_transform_nonuniform_illumination_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_nonuniform_illumination")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
pcv.params.debug = "plot"
_ = pcv.transform.nonuniform_illumination(img=rgb_img, ksize=11)
pcv.params.debug = "print"
corrected = pcv.transform.nonuniform_illumination(img=rgb_img, ksize=11)
assert np.mean(corrected) < np.mean(rgb_img)
def test_plantcv_transform_nonuniform_illumination_gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_nonuniform_illumination")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Load rgb image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "plot"
_ = pcv.transform.nonuniform_illumination(img=gray_img, ksize=11)
pcv.params.debug = "print"
corrected = pcv.transform.nonuniform_illumination(img=gray_img, ksize=11)
assert np.shape(corrected) == np.shape(gray_img)
def test_plantcv_transform_warp_default():
pcv.params.debug = "plot"
img = create_test_img((12, 10, 3))
refimg = create_test_img((12, 10, 3))
pts = [(0, 0),(1, 0),(0, 3),(4, 4)]
refpts = [(0, 0),(1, 0),(0, 3),(4, 4)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="default")
assert mat.shape == (3, 3)
def test_plantcv_transform_warp_lmeds():
pcv.params.debug = "plot"
img = create_test_img((10, 10, 3))
refimg = create_test_img((11, 11))
pts = [(0, 0), (1, 0), (0, 3), (4, 4)]
refpts = [(0, 0), (1, 0), (0, 3), (4, 4)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="lmeds")
assert mat.shape == (3, 3)
def test_plantcv_transform_warp_rho():
pcv.params.debug = "plot"
img = create_test_img_bin((10, 10))
refimg = create_test_img((11, 11))
pts = [(0, 0), (1, 0), (0, 3), (4, 4)]
refpts = [(0, 0), (1, 0), (0, 3), (4, 4)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="rho")
assert mat.shape == (3, 3)
def test_plantcv_transform_warp_ransac():
pcv.params.debug = "plot"
img = create_test_img((100, 150))
refimg = create_test_img((10, 15))
pts = [(0, 0), (149, 0), (99, 149), (0, 99), (3, 3)]
refpts = [(0, 0), (0, 14), (9, 14), (0, 9), (3, 3)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="ransac")
assert mat.shape == (3, 3)
@pytest.mark.parametrize("pts, refpts", [
[[(0,0)],[(0,0),(0,1)]], # different # of points provided for img and refimg
[[(0,0)],[(0,0)]], # not enough pairs of points provided
[[(0, 0), (0, 14), (9, 14), (0, 9), (3, 3)],
[(0, 0), (149, 0), (99, 149), (0, 99), (3, 3)]] # homography not able to be calculated (cannot converge)
])
def test_plantcv_transform_warp_err(pts, refpts):
img = create_test_img((10, 15))
refimg = create_test_img((100, 150))
method = "rho"
with pytest.raises(RuntimeError):
pcv.transform.warp(img, refimg, pts, refpts, method=method)
def test_plantcv_transform_warp_align():
img = create_test_img((10, 10, 3))
refimg = create_test_img((11, 11))
mat = np.array([[ 1.00000000e+00, 1.04238500e-15, -7.69185075e-16],
[ 1.44375646e-16, 1.00000000e+00, 0.00000000e+00],
[-5.41315251e-16, 1.78930521e-15, 1.00000000e+00]])
warp_img = pcv.transform.warp_align(img=img, mat=mat, refimg=refimg)
assert warp_img.shape == (11, 11, 3)
# ##############################
# Tests for the threshold subpackage
# ##############################
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_binary(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.binary(gray_img=gray_img, threshold=25, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_binary_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.binary(gray_img=gray_img, threshold=25, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_gaussian(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.gaussian(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_gaussian_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.gaussian(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_mean(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.mean(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_mean_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.mean(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_otsu(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GREENMAG), -1)
# Test with object set to light
pcv.params.debug = None
binary_img = pcv.threshold.otsu(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_otsu_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.otsu(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("channel,lower_thresh,upper_thresh", [["HSV", [0, 0, 0], [255, 255, 255]],
["LAB", [0, 0, 0], [255, 255, 255]],
["RGB", [0, 0, 0], [255, 255, 255]],
["GRAY", [0], [255]]])
def test_plantcv_threshold_custom_range_rgb(channel, lower_thresh, upper_thresh):
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
mask, binary_img = pcv.threshold.custom_range(img, lower_thresh=lower_thresh, upper_thresh=upper_thresh,
channel=channel)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_custom_range_grayscale():
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
# # Test channel='gray'
mask, binary_img = pcv.threshold.custom_range(gray_img, lower_thresh=[0], upper_thresh=[255], channel='gray')
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_custom_range_bad_input_hsv():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2, 2], channel='HSV')
def test_plantcv_threshold_custom_range_bad_input_rgb():
# Read in test data
pcv.params.debug = None
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2, 2], channel='RGB')
def test_plantcv_threshold_custom_range_bad_input_lab():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2], channel='LAB')
def test_plantcv_threshold_custom_range_bad_input_gray():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2], channel='gray')
def test_plantcv_threshold_custom_range_bad_input_channel():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0], upper_thresh=[2], channel='CMYK')
@pytest.mark.parametrize("channel", ["all", "any"])
def test_plantcv_threshold_saturation(channel):
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
thresh = pcv.threshold.saturation(rgb_img=rgb_img, threshold=254, channel=channel)
assert len(np.unique(thresh)) == 2
def test_plantcv_threshold_saturation_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.threshold.saturation(rgb_img=rgb_img, threshold=254, channel="red")
def test_plantcv_threshold_triangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_threshold_triangle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = None
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="dark", xstep=10)
pcv.params.debug = "plot"
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="light", xstep=10)
pcv.params.debug = "print"
binary_img = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="light", xstep=10)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_triangle_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="lite", xstep=10)
def test_plantcv_threshold_texture():
# Test with debug = None
pcv.params.debug = None
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
binary_img = pcv.threshold.texture(gray_img, ksize=6, threshold=7, offset=3, texture_method='dissimilarity',
borders='nearest', max_value=255)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def create_test_img(sz_img):
img = np.random.randint(np.prod(sz_img), size=sz_img) * 255
img = img.astype(np.uint8)
return img
def create_test_img_bin(sz_img):
img = np.zeros(sz_img)
img[3:7, 2:8] = 1
return img
@pytest.mark.parametrize("bad_type", ["native", "nan", "inf"])
def test_plantcv_threshold_mask_bad(bad_type):
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
bad_img[2, 2] = np.inf
bad_img[2, 3] = np.nan
sz = np.shape(bad_img)
pcv.params.debug = None
mask = pcv.threshold.mask_bad(bad_img, bad_type=bad_type)
assert((np.shape(mask) == sz) and (len(np.unique(mask)) == 2))
def test_plantcv_threshold_mask_bad_native_bad_input():
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
sz = np.shape(bad_img)
mask10 = pcv.threshold.mask_bad(bad_img, bad_type='native')
assert mask10.all() == np.zeros(sz, dtype='uint8').all()
def test_plantcv_threshold_mask_bad_nan_bad_input():
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
bad_img[2, 2] = np.inf
sz = np.shape(bad_img)
mask11 = pcv.threshold.mask_bad(bad_img, bad_type='nan')
assert mask11.all() == np.zeros(sz, dtype='uint8').all()
def test_plantcv_threshold_mask_bad_input_color_img():
# Read in test data
bad_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.threshold.mask_bad(bad_img, bad_type='nan')
# ###################################
# Tests for the visualize subpackage
# ###################################
def test_plantcv_visualize_auto_threshold_methods_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_threshold_methods")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.visualize.auto_threshold_methods(gray_img=img)
def test_plantcv_visualize_auto_threshold_methods():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_threshold_methods")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
_ = pcv.visualize.auto_threshold_methods(gray_img=img)
pcv.params.debug = "plot"
labeled_imgs = pcv.visualize.auto_threshold_methods(gray_img=img)
assert len(labeled_imgs) == 5 and np.shape(labeled_imgs[0])[0] == np.shape(img)[0]
@pytest.mark.parametrize("debug,axes", [["print", True], ["plot", False]])
def test_plantcv_visualize_pseudocolor(debug, axes, tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
pcv.params.debug_outdir = cache_dir
# Input image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
r, c = img.shape
# generate 200 "bad" pixels
mask_bad = np.zeros((r, c), dtype=np.uint8)
mask_bad = np.reshape(mask_bad, (-1, 1))
mask_bad[0:100] = 255
mask_bad = np.reshape(mask_bad, (r, c))
# Debug mode
pcv.params.debug = debug
pseudo_img = pcv.visualize.pseudocolor(gray_img=img, mask=None, title="Pseudocolored image", axes=axes,
bad_mask=mask_bad)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(pseudo_img), TEST_BINARY_DIM))
@pytest.mark.parametrize("bkgrd,axes,pad", [["image", True, "auto"], ["white", False, 1], ["black", True, "auto"]])
def test_plantcv_visualize_pseudocolor_mask(bkgrd, axes, pad):
# Test with debug = None
pcv.params.debug = None
# Input image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Input mask
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Input contours
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
r, c = img.shape
# generate 200 "bad" pixels
mask_bad = np.zeros((r, c), dtype=np.uint8)
mask_bad = np.reshape(mask_bad, (-1, 1))
mask_bad[0:100] = 255
mask_bad = np.reshape(mask_bad, (r, c))
pseudo_img = pcv.visualize.pseudocolor(gray_img=img, obj=obj_contour, mask=mask, background=bkgrd,
bad_mask=mask_bad, title="Pseudocolored image", axes=axes, obj_padding=pad)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(pseudo_img), TEST_BINARY_DIM)):
assert 1
else:
assert 0
def test_plantcv_visualize_pseudocolor_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img)
def test_plantcv_visualize_pseudocolor_bad_background():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor_bad_background")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img, mask=mask, background="pink")
def test_plantcv_visualize_pseudocolor_bad_padding():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor_bad_background")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img, mask=mask, obj=obj_contour, obj_padding="pink")
def test_plantcv_visualize_pseudocolor_bad_mask():
# Test with debug = None
pcv.params.debug = None
def test_plantcv_visualize_colorize_masks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=[(0, 0, 0), (1, 1, 1)])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=[(0, 0, 0), (1, 1, 1)])
# Test with debug = None
pcv.params.debug = None
colored_img = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=['red', 'blue'])
# Assert that the output image has the dimensions of the input image
assert not np.average(colored_img) == 0
def test_plantcv_visualize_colorize_masks_bad_input_empty():
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[], colors=[])
def test_plantcv_visualize_colorize_masks_bad_input_mismatch_number():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']], colors=['red', 'green', 'blue'])
def test_plantcv_visualize_colorize_masks_bad_color_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']], colors=['red', 1.123])
def test_plantcv_visualize_colorize_label_img():
label_img = np.array([[1,2,3],[4,5,6],[7,8,9]])
pcv.params.debug = None
colored_img = pcv.visualize.colorize_label_img(label_img)
assert (colored_img.shape[0:-1] == label_img.shape) and colored_img.shape[-1] == 3
@pytest.mark.parametrize("bins,lb,ub,title", [[200, 0, 255, "Include Title"], [100, None, None, None]])
def test_plantcv_visualize_histogram(bins, lb, ub, title):
# Test with debug = None
pcv.params.debug = None
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
fig_hist, hist_df = pcv.visualize.histogram(img=img, mask=mask, bins=bins, lower_bound=lb, upper_bound=ub,
title=title, hist_data=True)
assert all([isinstance(fig_hist, ggplot), isinstance(hist_df, pd.core.frame.DataFrame)])
def test_plantcv_visualize_histogram_no_mask():
# Test with debug = None
pcv.params.debug = None
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
fig_hist = pcv.visualize.histogram(img=img, mask=None)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_rgb_img():
# Test with debug = None
pcv.params.debug = None
# Test RGB input image
img_rgb = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fig_hist = pcv.visualize.histogram(img=img_rgb)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_multispectral_img():
# Test with debug = None
pcv.params.debug = None
# Test multi-spectral image
img_rgb = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_multi = np.concatenate((img_rgb, img_rgb), axis=2)
fig_hist = pcv.visualize.histogram(img=img_multi)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_no_img():
with pytest.raises(RuntimeError):
_ = pcv.visualize.histogram(img=None)
def test_plantcv_visualize_histogram_array():
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.histogram(img=img[0, :])
def test_plantcv_visualize_clustered_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_BACKGROUND), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_CONTOUR), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_HIERARCHY), encoding="latin1")
cluster_i = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_CLUSTERS), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
cluster = [cluster_i[arr_n] for arr_n in cluster_i]
# Test in plot mode
pcv.params.debug = "plot"
# Reset the saved color scale (can be saved between tests)
pcv.params.saved_color_scale = None
_ = pcv.visualize.clustered_contours(img=img1, grouped_contour_indices=cluster, roi_objects=objs,
roi_obj_hierarchy=obj_hierarchy, bounding=False)
# Test in print mode
pcv.params.debug = "print"
# Reset the saved color scale (can be saved between tests)
pcv.params.saved_color_scale = None
cluster_img = pcv.visualize.clustered_contours(img=img, grouped_contour_indices=cluster, roi_objects=objs,
roi_obj_hierarchy=obj_hierarchy, nrow=2, ncol=2, bounding=True)
assert np.sum(cluster_img) > np.sum(img)
def test_plantcv_visualize_colorspaces():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = "plot"
vis_img_small = pcv.visualize.colorspaces(rgb_img=img, original_img=False)
pcv.params.debug = "print"
vis_img = pcv.visualize.colorspaces(rgb_img=img)
assert np.shape(vis_img)[1] > (np.shape(img)[1]) and np.shape(vis_img_small)[1] > (np.shape(img)[1])
def test_plantcv_visualize_colorspaces_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorspaces(rgb_img=img)
def test_plantcv_visualize_overlay_two_imgs():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
pcv.params.debug = None
out_img = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
sample_pt1 = img1[1445, 1154]
sample_pt2 = img2[1445, 1154]
sample_pt3 = out_img[1445, 1154]
pred_rgb = (sample_pt1 * 0.5) + (sample_pt2 * 0.5)
pred_rgb = pred_rgb.astype(np.uint8)
assert np.array_equal(sample_pt3, pred_rgb)
def test_plantcv_visualize_overlay_two_imgs_grayscale():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_grayscale")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
out_img = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
sample_pt1 = np.array([255, 255, 255], dtype=np.uint8)
sample_pt2 = np.array([255, 255, 255], dtype=np.uint8)
sample_pt3 = out_img[1445, 1154]
pred_rgb = (sample_pt1 * 0.5) + (sample_pt2 * 0.5)
pred_rgb = pred_rgb.astype(np.uint8)
assert np.array_equal(sample_pt3, pred_rgb)
def test_plantcv_visualize_overlay_two_imgs_bad_alpha():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_bad_alpha")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
alpha = -1
with pytest.raises(RuntimeError):
_ = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2, alpha=alpha)
def test_plantcv_visualize_overlay_two_imgs_size_mismatch():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_size_mismatch")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
with pytest.raises(RuntimeError):
_ = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
@pytest.mark.parametrize("title", ["Include Title", None])
def test_plantcv_visualize_obj_size_ecdf(title):
pcv.params.debug = None
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
fig_ecdf = plantcv.plantcv.visualize.obj_size_ecdf(mask=mask, title=title)
assert isinstance(fig_ecdf, ggplot)
# ##############################
# Tests for the utils subpackage
# ##############################
def test_plantcv_utils_json2csv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv")
os.mkdir(cache_dir)
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "merged_output.json"),
csv_file=os.path.join(cache_dir, "exports"))
assert all([os.path.exists(os.path.join(cache_dir, "exports-single-value-traits.csv")),
os.path.exists(os.path.join(cache_dir, "exports-multi-value-traits.csv"))])
def test_plantcv_utils_json2csv_no_json():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv_no_json")
os.mkdir(cache_dir)
with pytest.raises(IOError):
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "not_a_file.json"),
csv_file=os.path.join(cache_dir, "exports"))
def test_plantcv_utils_json2csv_bad_json():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv_bad_json")
os.mkdir(cache_dir)
with pytest.raises(ValueError):
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "incorrect_json_data.txt"),
csv_file=os.path.join(cache_dir, "exports"))
def test_plantcv_utils_sample_images_snapshot():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
snapshot_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
img_outdir = os.path.join(cache_dir, "snapshot")
plantcv.utils.sample_images(source_path=snapshot_dir, dest_path=img_outdir, num=3)
assert os.path.exists(os.path.join(cache_dir, "snapshot"))
def test_plantcv_utils_sample_images_flatdir():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
flat_dir = os.path.join(TEST_DATA)
img_outdir = os.path.join(cache_dir, "images")
plantcv.utils.sample_images(source_path=flat_dir, dest_path=img_outdir, num=30)
random_images = os.listdir(img_outdir)
assert all([len(random_images) == 30, len(np.unique(random_images)) == 30])
def test_plantcv_utils_sample_images_bad_source():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
fake_dir = os.path.join(TEST_DATA, "snapshot")
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(IOError):
plantcv.utils.sample_images(source_path=fake_dir, dest_path=img_outdir, num=3)
def test_plantcv_utils_sample_images_bad_flat_num():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
flat_dir = os.path.join(TEST_DATA)
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(RuntimeError):
plantcv.utils.sample_images(source_path=flat_dir, dest_path=img_outdir, num=300)
def test_plantcv_utils_sample_images_bad_phenofront_num():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
snapshot_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(RuntimeError):
plantcv.utils.sample_images(source_path=snapshot_dir, dest_path=img_outdir, num=300)
def test_plantcv_utils_tabulate_bayes_classes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_tabulate_bayes_classes")
os.mkdir(cache_dir)
outfile = os.path.join(cache_dir, "rgb_table.txt")
plantcv.utils.tabulate_bayes_classes(input_file=os.path.join(TEST_DATA, PIXEL_VALUES), output_file=outfile)
table = pd.read_csv(outfile, sep="\t")
assert table.shape == (228, 2)
def test_plantcv_utils_tabulate_bayes_classes_missing_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_tabulate_bayes_classes_missing_input")
os.mkdir(cache_dir)
outfile = os.path.join(cache_dir, "rgb_table.txt")
with pytest.raises(IOError):
plantcv.utils.tabulate_bayes_classes(input_file=os.path.join(PIXEL_VALUES), output_file=outfile)
# ##############################
# Clean up test files
# ##############################
def teardown_function():
shutil.rmtree(TEST_TMPDIR)
| mit | -1,479,096,808,573,593,300 | 42.811997 | 229 | 0.650335 | false |
spectralpython/spectral | spectral/database/aster.py | 1 | 15620 | '''
Code for reading and managing ASTER spectral library data.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
from spectral.utilities.python23 import IS_PYTHON3, tobytes, frombytes
from .spectral_database import SpectralDatabase
if IS_PYTHON3:
readline = lambda fin: fin.readline()
open_file = lambda filename: open(filename, encoding='iso-8859-1')
else:
readline = lambda fin: fin.readline().decode('iso-8859-1')
open_file = lambda filename: open(filename)
table_schemas = [
'CREATE TABLE Samples (SampleID INTEGER PRIMARY KEY, Name TEXT, Type TEXT, Class TEXT, SubClass TEXT, '
'ParticleSize TEXT, SampleNum TEXT, Owner TEXT, Origin TEXT, Phase TEXT, Description TEXT)',
'CREATE TABLE Spectra (SpectrumID INTEGER PRIMARY KEY, SampleID INTEGER, SensorCalibrationID INTEGER, '
'Instrument TEXT, Environment TEXT, Measurement TEXT, '
'XUnit TEXT, YUnit TEXT, MinWavelength FLOAT, MaxWavelength FLOAT, '
'NumValues INTEGER, XData BLOB, YData BLOB)',
]
arraytypecode = chr(ord('f'))
# These files contained malformed signature data and will be ignored.
bad_files = [
'jhu.nicolet.mineral.silicate.tectosilicate.fine.albite1.spectrum.txt',
'usgs.perknic.rock.igneous.mafic.colid.me3.spectrum.txt'
]
def read_pair(fin, num_lines=1):
'''Reads a colon-delimited attribute-value pair from the file stream.'''
s = ''
for i in range(num_lines):
s += " " + readline(fin).strip()
return [x.strip().lower() for x in s.split(':')]
class Signature:
'''Object to store sample/measurement metadata, as well as wavelength-signatrure vectors.'''
def __init__(self):
self.sample = {}
self.measurement = {}
def read_aster_file(filename):
'''Reads an ASTER 2.x spectrum file.'''
fin = open_file(filename)
s = Signature()
# Number of lines per metadata attribute value
lpv = [1] * 8 + [2] + [6]
# A few files have an additional "Colleted by" sample metadata field, which
# sometimes affects the number of header lines
haveCollectedBy = False
for i in range(30):
line = readline(fin).strip()
if line.find('Collected by:') >= 0:
haveCollectedBy = True
collectedByLineNum = i
if line.startswith('Description:'):
descriptionLineNum = i
if line.startswith('Measurement:'):
measurementLineNum = i
if haveCollectedBy:
lpv = [1] * 10 + [measurementLineNum - descriptionLineNum]
# Read sample metadata
fin.seek(0)
for i in range(len(lpv)):
pair = read_pair(fin, lpv[i])
s.sample[pair[0].lower()] = pair[1]
# Read measurement metadata
lpv = [1] * 8 + [2]
for i in range(len(lpv)):
pair = read_pair(fin, lpv[i])
if len(pair) < 2:
print(pair)
s.measurement[pair[0].lower()] = pair[1]
# Read signature spectrum
pairs = []
for line in fin.readlines():
line = line.strip()
if len(line) == 0:
continue
pair = line.split()
nItems = len(pair)
# Try to handle invalid values on signature lines
if nItems == 1:
# print 'single item (%s) on signature line, %s' \
# % (pair[0], filename)
continue
elif nItems > 2:
print('more than 2 values on signature line,', filename)
continue
try:
x = float(pair[0])
except:
print('corrupt signature line,', filename)
if x == 0:
# print 'Zero wavelength value', filename
continue
elif x < 0:
print('Negative wavelength value,', filename)
continue
pairs.append(pair)
[x, y] = [list(v) for v in zip(*pairs)]
# Make sure wavelengths are ascending
if float(x[0]) > float(x[-1]):
x.reverse()
y.reverse()
s.x = [float(val) for val in x]
s.y = [float(val) for val in y]
s.measurement['first x value'] = x[0]
s.measurement['last x value'] = x[-1]
s.measurement['number of x values'] = len(x)
fin.close()
return s
class AsterDatabase(SpectralDatabase):
'''A relational database to manage ASTER spectral library data.'''
schemas = table_schemas
def _add_sample(self, name, sampleType, sampleClass, subClass,
particleSize, sampleNumber, owner, origin, phase,
description):
sql = '''INSERT INTO Samples (Name, Type, Class, SubClass, ParticleSize, SampleNum, Owner, Origin, Phase, Description)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
self.cursor.execute(sql, (name, sampleType, sampleClass, subClass,
particleSize, sampleNumber, owner, origin,
phase, description))
rowId = self.cursor.lastrowid
self.db.commit()
return rowId
def _add_signature(
self, sampleID, calibrationID, instrument, environment, measurement,
xUnit, yUnit, minWavelength, maxWavelength, xData, yData):
import sqlite3
import array
sql = '''INSERT INTO Spectra (SampleID, SensorCalibrationID, Instrument,
Environment, Measurement, XUnit, YUnit, MinWavelength, MaxWavelength,
NumValues, XData, YData) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
xBlob = sqlite3.Binary(tobytes(array.array(arraytypecode, xData)))
yBlob = sqlite3.Binary(tobytes(array.array(arraytypecode, yData)))
numValues = len(xData)
self.cursor.execute(
sql, (
sampleID, calibrationID, instrument, environment, measurement,
xUnit, yUnit, minWavelength, maxWavelength, numValues, xBlob,
yBlob))
rowId = self.cursor.lastrowid
self.db.commit()
return rowId
@classmethod
def create(cls, filename, aster_data_dir=None):
'''Creates an ASTER relational database by parsing ASTER data files.
Arguments:
`filename` (str):
Name of the new sqlite database file to create.
`aster_data_dir` (str):
Path to the directory containing ASTER library data files. If
this argument is not provided, no data will be imported.
Returns:
An :class:`~spectral.database.AsterDatabase` object.
Example::
>>> AsterDatabase.create("aster_lib.db", "/CDROM/ASTER2.0/data")
This is a class method (it does not require instantiating an
AsterDatabase object) that creates a new database by parsing all of the
files in the ASTER library data directory. Normally, this should only
need to be called once. Subsequently, a corresponding database object
can be created by instantiating a new AsterDatabase object with the
path the database file as its argument. For example::
>>> from spectral.database.aster import AsterDatabase
>>> db = AsterDatabase("aster_lib.db")
'''
import os
if os.path.isfile(filename):
raise Exception('Error: Specified file already exists.')
db = cls()
db._connect(filename)
for schema in cls.schemas:
db.cursor.execute(schema)
if aster_data_dir:
db._import_files(aster_data_dir)
return db
def __init__(self, sqlite_filename=None):
'''Creates a database object to interface an existing database.
Arguments:
`sqlite_filename` (str):
Name of the database file. If this argument is not provided,
an interface to a database file will not be established.
Returns:
An :class:`~spectral.AsterDatabase` connected to the database.
'''
from spectral.io.spyfile import find_file_path
if sqlite_filename:
self._connect(find_file_path(sqlite_filename))
else:
self.db = None
self.cursor = None
def read_file(self, filename):
return read_aster_file(filename)
def _import_files(self, data_dir, ignore=bad_files):
'''Read each file in the ASTER library and convert to AVIRIS bands.'''
from glob import glob
import numpy
import os
if not os.path.isdir(data_dir):
raise Exception('Error: Invalid directory name specified.')
if ignore is not None:
filesToIgnore = [data_dir + '/' + f for f in ignore]
else:
filesToIgnore = []
numFiles = 0
numIgnored = 0
sigID = 1
class Sig:
pass
sigs = []
for f in glob(data_dir + '/*spectrum.txt'):
if f in filesToIgnore:
numIgnored += 1
continue
print('Importing %s.' % f)
numFiles += 1
sig = self.read_file(f)
s = sig.sample
if s['particle size'].lower == 'liquid':
phase = 'liquid'
else:
phase = 'solid'
if 'sample no.' in s:
sampleNum = s['sample no.']
else:
sampleNum = ''
id = self._add_sample(
s['name'], s['type'], s['class'], s[
'subclass'], s['particle size'],
sampleNum, s['owner'], s['origin'], phase, s['description'])
instrument = os.path.basename(f).split('.')[1]
environment = 'lab'
m = sig.measurement
# Correct numerous mispellings of "reflectance" and "transmittance"
yUnit = m['y units']
if yUnit.find('reflectence') > -1:
yUnit = 'reflectance (percent)'
elif yUnit.find('trans') == 0:
yUnit = 'transmittance (percent)'
measurement = m['measurement']
if measurement[0] == 't':
measurement = 'transmittance'
self._add_signature(id, -1, instrument, environment, measurement,
m['x units'], yUnit, m['first x value'],
m['last x value'], sig.x, sig.y)
if numFiles == 0:
print('No data files were found in directory "%s".' \
% data_dir)
else:
print('Processed %d files.' % numFiles)
if numIgnored > 0:
print('Ignored the following %d bad files:' % (numIgnored))
for f in filesToIgnore:
print('\t' + f)
return sigs
def get_spectrum(self, spectrumID):
'''Returns a spectrum from the database.
Usage:
(x, y) = aster.get_spectrum(spectrumID)
Arguments:
`spectrumID` (int):
The **SpectrumID** value for the desired spectrum from the
**Spectra** table in the database.
Returns:
`x` (list):
Band centers for the spectrum.
`y` (list):
Spectrum data values for each band.
Returns a pair of vectors containing the wavelengths and measured
values values of a measurment. For additional metadata, call
"get_signature" instead.
'''
import array
query = '''SELECT XData, YData FROM Spectra WHERE SpectrumID = ?'''
result = self.cursor.execute(query, (spectrumID,))
rows = result.fetchall()
if len(rows) < 1:
raise 'Measurement record not found'
x = array.array(arraytypecode)
frombytes(x, rows[0][0])
y = array.array(arraytypecode)
frombytes(y, rows[0][1])
return (list(x), list(y))
def get_signature(self, spectrumID):
'''Returns a spectrum with some additional metadata.
Usage::
sig = aster.get_signature(spectrumID)
Arguments:
`spectrumID` (int):
The **SpectrumID** value for the desired spectrum from the
**Spectra** table in the database.
Returns:
`sig` (:class:`~spectral.database.aster.Signature`):
An object with the following attributes:
============== ===== ========================================
Attribute Type Description
============== ===== ========================================
measurement_id int SpectrumID value from Spectra table
sample_name str **Sample** from the **Samples** table
sample_id int **SampleID** from the **Samples** table
x list list of band center wavelengths
y list list of spectrum values for each band
============== ===== ========================================
'''
import array
# Retrieve spectrum from Spectra table
query = '''SELECT Samples.Name, Samples.SampleID, XData, YData
FROM Samples, Spectra WHERE Samples.SampleID = Spectra.SampleID
AND Spectra.SpectrumID = ?'''
result = self.cursor.execute(query, (spectrumID,))
results = result.fetchall()
if len(results) < 1:
raise "Measurement record not found"
sig = Signature()
sig.measurement_id = spectrumID
sig.sample_name = results[0][0]
sig.sample_id = results[0][1]
x = array.array(arraytypecode)
frombytes(x, results[0][2])
sig.x = list(x)
y = array.array(arraytypecode)
frombytes(y, results[0][3])
sig.y = list(y)
return sig
def create_envi_spectral_library(self, spectrumIDs, bandInfo):
'''Creates an ENVI-formatted spectral library for a list of spectra.
Arguments:
`spectrumIDs` (list of ints):
List of **SpectrumID** values for of spectra in the "Spectra"
table of the ASTER database.
`bandInfo` (:class:`~spectral.BandInfo`):
The spectral bands to which the original ASTER library spectra
will be resampled.
Returns:
A :class:`~spectral.io.envi.SpectralLibrary` object.
The IDs passed to the method should correspond to the SpectrumID field
of the ASTER database "Spectra" table. All specified spectra will be
resampled to the same discretization specified by the bandInfo
parameter. See :class:`spectral.BandResampler` for details on the
resampling method used.
'''
from spectral.algorithms.resampling import BandResampler
from spectral.io.envi import SpectralLibrary
import numpy
import unicodedata
spectra = numpy.empty((len(spectrumIDs), len(bandInfo.centers)))
names = []
for i in range(len(spectrumIDs)):
sig = self.get_signature(spectrumIDs[i])
resample = BandResampler(
sig.x, bandInfo.centers, None, bandInfo.bandwidths)
spectra[i] = resample(sig.y)
names.append(unicodedata.normalize('NFKD', sig.sample_name).
encode('ascii', 'ignore'))
header = {}
header['wavelength units'] = 'um'
header['spectra names'] = names
header['wavelength'] = bandInfo.centers
header['fwhm'] = bandInfo.bandwidths
return SpectralLibrary(spectra, header, {})
| gpl-2.0 | -1,652,097,644,647,048,200 | 33.866071 | 126 | 0.564277 | false |
pbugni/pheme.webAPIclient | pheme/webAPIclient/archive.py | 1 | 4121 | from datetime import datetime
import os
import requests
import json
from pheme.util.config import Config
def url_builder(predicate=None, resource=None, view=None, query_params={}):
"""Build webAPI url from config and passed values
:param predicate: desired action or type of document
:param resource: filename or object identifier
:param view: specilized view, such as metadata
:param query_params: dictionary of key, values to append
returns URL ready for request, post, etc.
"""
config = Config()
url = 'http://%s:%s' % (config.get("WebAPI", "host"),
config.get("WebAPI", "port"))
if predicate:
url = '/'.join((url, predicate))
if resource:
url = '/'.join((url, resource))
if view:
url = '/'.join((url, '@@' + view))
if query_params:
url = '?'.join((url,
'&'.join([k+'='+v for k, v in query_params.items()])))
return url
def document_store(document, document_type, compress_with=None,
allow_duplicate_filename=False, **metadata):
"""Client call to put document and meta data in PHEME archive
The PHEME archive exposes a Wep API to PUT documents in the
document store (database), among other things. This function
wraps the HTTP request for easy client code use.
:param document: the document to persist, a path to the readable
file on the local filesystem.
:param document_type: type, such as 'essence', 'gipse', etc. See
pheme.webAPI.resources.Root for options.
:param compress_with: Can be 'gzip' or 'zip' (or None). Will
transmit the requested compression of the document prior to store.
:param allow_duplicate_filename: If set, duplicates will be
versioned. By default a duplicate raises an exception.
:param metadata: Any additional key, value strings to associate
with the document
returns the resulting document_id, a key which may be used to
retrieve the same document.
"""
url = url_builder(predicate=document_type,
resource=os.path.basename(document))
payload = dict()
if compress_with:
payload['compress_with'] = compress_with
if allow_duplicate_filename:
payload['allow_duplicate_filename'] = allow_duplicate_filename
if metadata:
# special handler for datetime types
datetime_handler = lambda x: x.isoformat()\
if isinstance(x, datetime)\
else None
payload['metadata'] = json.dumps(metadata, default=datetime_handler)
with open(document, 'rb') as content:
files = {os.path.basename(document): content}
r = requests.put(url, files=files, data=payload)
if r.status_code != 200: # pragma no cover
raise RuntimeError("Failed POST (%d) for store document: "
"%s , see PHEME archive log" %
(r.status_code, url))
# Pull the doc id from the json reponse
response = json.loads("".join([i for i in r.iter_content()]))
return response['document_id']
def document_delete(document_id):
"""Delete the requested document"""
r = requests.delete(url_builder(resource=document_id))
assert(r.status_code == 200)
def document_fetch_metadata(document_id):
"""Returns all metadata from the archived document if found"""
r = requests.get(url_builder(resource=document_id, view='metadata'))
return(json.loads(r.text))
def document_find(criteria, limit=0):
"""Search for best matching document(s) in archive
:param criteria: dictionary of key, values to search for
:param limit: optional restriction on number or matching docs;
zero implies no limit
returns a list of metadata if multiple matches are found.
returns the document text if only a single match or limit is set to 1.
"""
query_params = {'query': json.dumps(criteria), 'limit': str(limit)}
r = requests.get(url_builder(predicate='search',
query_params=query_params))
return json.loads(r.text)
| bsd-3-clause | 1,370,055,112,182,797,800 | 33.923729 | 78 | 0.645232 | false |
ActiveState/code | recipes/Python/436229_RecordJar_Parser/recipe-436229.py | 1 | 2025 | #!/usr/bin/env python
# recordjar.py - Parse a Record-Jar into a list of dictionaries.
# Copyright 2005 Lutz Horn <[email protected]>
# Licensed unter the same terms as Python.
def parse_jar(flo):
"""Parse a Record-Jar from a file like object into a list of dictionaries.
This method parses a file like object as described in "The Art of Unix
Programming" <http://www.faqs.org/docs/artu/ch05s02.html#id2906931>.
The records are divided by lines containing '%%'. Each record consists of
one or more lines, each containing a key, a colon, and a value. Whitespace
around both key and value are ignored.
>>> import StringIO
>>> flo = StringIO.StringIO("a:b\\nc:d\\n%%\\nx:y\\n")
>>> out = parse_jar(flo)
>>> print out
[{'a': 'b', 'c': 'd'}, {'x': 'y'}]
If a record contains a key more than once, the value for this key is a list
containing the values in their order of occurence.
>>> flo = StringIO.StringIO("a:b\\nc:d\\n%%\\nx:y\\nx:z\\n")
>>> out = parse_jar(flo)
>>> print out
[{'a': 'b', 'c': 'd'}, {'x': ['y', 'z']}]
Leading or trailing separator lines ('%%') and lines containing only
whitespace are ignored.
>>> flo = StringIO.StringIO("%%\\na:b\\nc:d\\n%%\\n\\nx:y\\nx:z\\n")
>>> out = parse_jar(flo)
>>> print out
[{'a': 'b', 'c': 'd'}, {'x': ['y', 'z']}]
"""
records = []
for record in flo.read().split("%%"):
dict = {}
for line in [line for line in record.split("\n") if line.strip() != ""]:
key, value = line.split(":", 1)
key, value = key.strip(), value.strip()
try:
dict[key].append(value)
except AttributeError:
dict[key] = [dict[key], value]
except KeyError:
dict[key] = value
if len(dict) > 0:
records.append(dict)
return records
def _test():
import doctest, recordjar
return doctest.testmod(recordjar)
if __name__ == "__main__":
_test()
| mit | -8,003,185,806,157,289,000 | 32.75 | 80 | 0.565432 | false |
randomchars/pushbullet.py | tests/fixtures.py | 1 | 2011 | import time
devices_list_response = {
"devices": [
{
"active": True,
"iden": "1",
"created": time.time(),
"modified": time.time(),
"icon": "system",
"generated_nickname": False,
"nickname": "test dev",
"manufacturer": "test c",
"model": "test m",
"has_sms": False,
},
{
"active": False,
"iden": "2",
"created": time.time(),
"modified": time.time(),
"icon": "system",
"generated_nickname": False,
"nickname": "test dev",
"manufacturer": "test c",
"model": "test m",
"has_sms": False,
},
]
}
chats_list_response = {
"chats": [
{
"active": True,
"created": time.time(),
"modified": time.time(),
"with": {
"name": "test chat",
"status": "user",
"email": "[email protected]",
"email_normalized": "[email protected]",
},
},
{
"active": False,
"created": time.time(),
"modified": time.time(),
"with": {
"name": "test chat",
"status": "user",
"email": "[email protected]",
"email_normalized": "[email protected]",
},
},
]
}
channels_list_response = {
"channels": [
{
"iden": "test_iden",
"name": "test channel",
"created": time.time(),
"modified": time.time(),
"tag": "test_tag",
"active": True,
},
{
"iden": "test_iden2",
"name": "test channel",
"created": time.time(),
"modified": time.time(),
"tag": "test_tag",
"active": False,
},
]
}
| mit | -2,881,495,122,680,451,000 | 24.455696 | 62 | 0.380905 | false |
mbourqui/django-publications-bootstrap | publications_bootstrap/admin_views/import_bibtex.py | 1 | 7261 | # -*- coding: utf-8 -*-
import re
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django_countries import countries
from ..bibtex import parse
from ..models import Publication, Type
# mapping of months
MONTHS = {
'jan': 1, 'january': 1,
'feb': 2, 'february': 2,
'mar': 3, 'march': 3,
'apr': 4, 'april': 4,
'may': 5,
'jun': 6, 'june': 6,
'jul': 7, 'july': 7,
'aug': 8, 'august': 8,
'sep': 9, 'september': 9,
'oct': 10, 'october': 10,
'nov': 11, 'november': 11,
'dec': 12, 'december': 12}
COUNTRIES_BY_CODE = dict(countries)
# Reversed dict
try:
# Python 2.7.x
COUNTRIES_BY_NAME = {v: k for k, v in COUNTRIES_BY_CODE.iteritems()}
except:
# Python 3+
COUNTRIES_BY_NAME = {v: k for k, v in COUNTRIES_BY_CODE.items()}
def import_bibtex(request):
if request.method == 'POST':
# try to parse BibTex
bib = parse(request.POST['bibliography'])
# container for error messages
errors = {}
# publication types
types = Type.objects.all()
# check for errors
if not bib:
if not request.POST['bibliography']:
errors['bibliography'] = 'This field is required.'
if not errors:
publications = []
# try adding publications
for entry in bib:
if 'title' in entry and 'author' in entry and 'year' in entry:
# parse authors
authors = entry['author'].split(' and ')
for i in range(len(authors)):
author = authors[i].split(',')
author = [author[-1]] + author[:-1]
authors[i] = ' '.join(author)
authors = ', '.join(authors)
# add missing keys
keys = [
'journal',
'booktitle',
'address',
'publisher',
'editor',
'edition',
'institution',
'school',
'organization',
'series',
'url',
'doi',
'isbn',
'tags',
'note',
'abstract',
'month']
for key in keys:
if key not in entry:
entry[key] = ''
# map integer fields to integers
entry['month'] = Publication.EMonths.get(MONTHS.get(entry['month'].lower(), 0), None)
for field in ['volume', 'number', 'chapter', 'section']:
entry[field] = entry.get(field, None)
# remove whitespace characters (likely due to line breaks)
entry['url'] = re.sub(r'\s', '', entry['url'])
if 'country' not in entry:
entry['country'] = ''
else:
if entry['country'].strip() in COUNTRIES_BY_NAME:
entry['country'] = COUNTRIES_BY_NAME[entry['country'].strip()]
elif entry['country'].upper() in COUNTRIES_BY_CODE:
entry['country'] = entry['country'].upper()
else:
entry['country'] = ''
# determine type
type_id = None
for t in types:
if entry['type'] in t.bibtex_type_list:
type_id = t.id
break
if type_id is None:
errors['bibliography'] = 'Type "{}" unknown.'.format(entry['type'])
break
# add publication
publications.append(Publication(
type_id=type_id,
citekey=entry['key'],
title=entry['title'],
authors=authors,
year=entry['year'],
month=entry['month'],
journal=entry['journal'],
book_title=entry['booktitle'],
publisher=entry['publisher'],
location=entry['address'],
country=entry['country'],
editor=entry['editor'],
edition=entry['edition'],
institution=entry['institution'],
school=entry['school'],
organization=entry['organization'],
series=entry['series'],
volume=entry['volume'],
number=entry['number'],
chapter=entry['chapter'],
section=entry['section'],
note=entry['note'],
url=entry['url'],
doi=entry['doi'],
isbn=entry['isbn'],
external=False,
abstract=entry['abstract'],
tags=entry['tags'],
status=Publication.EStatuses.PUBLISHED))
else:
errors['bibliography'] = 'Make sure that the keys <title>, <author> and <year> are present.'
break
if not publications:
errors['bibliography'] = 'No valid BibTex entries found.'
if errors:
# some error occurred
return render(
request,
'admin/publications_bootstrap/import_bibtex.html', {
'errors': errors,
'title': 'Import BibTex',
'types': Type.objects.all(),
'request': request})
else:
try:
# save publications
for publication in publications:
publication.save()
except:
msg = 'Some error occurred during saving of publications.'
else:
if len(publications) > 1:
msg = 'Successfully added {} publications.'.format(len(publications))
else:
msg = 'Successfully added {} publication.'.format(len(publications))
# show message
messages.info(request, msg)
# redirect to publication listing
return HttpResponseRedirect('../')
else:
return render(request, 'admin/publications_bootstrap/import_bibtex.html', {'title': 'Import BibTex',
'types': Type.objects.all(),
'request': request})
import_bibtex = staff_member_required(import_bibtex)
| mit | -2,223,934,309,947,411,700 | 36.235897 | 112 | 0.428729 | false |
hbeatty/dell-wsman-client-api-python | setup.py | 1 | 1520 | """
Setup file for egg builds
@copyright: 2010-2012
@author: Joseph Tallieu <[email protected]>
@author: Vijay Halaharvi <[email protected]>
@organization: Dell Inc. - PG Validation
@license: GNU LGLP v2.1
"""
# This file is part of WSManAPI.
#
# WSManAPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# WSManAPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with WSManAPI. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
# setup meta data and entry points
setup(
name='wsman',
version="0.9.27",
description="Web Services Management",
author="Vijay Halaharvi, Joseph Tallieu",
author_email="[email protected], [email protected]",
license="Dell Software License",
packages=find_packages(),
package_data={'wsman':['transport/dummy/responses/winrm/*',
'transport/dummy/responses/wsmancli/*',
'loghandlers/templates/*']},
include_package_data=True
)
| lgpl-3.0 | -7,334,710,254,634,723,000 | 36.073171 | 80 | 0.694079 | false |
dchenaux/Yoda | yoda/flask_debugtoolbar_mongo/panel.py | 1 | 2746 | from flask_debugtoolbar.panels import DebugPanel
import jinja2
from . import operation_tracker
from . import jinja_filters
class MongoDebugPanel(DebugPanel):
"""Panel that shows information about MongoDB operations.
"""
name = 'Mongo'
has_content = True
def __init__(self, *args, **kwargs):
super(MongoDebugPanel, self).__init__(*args, **kwargs)
self.jinja_env.loader = jinja2.ChoiceLoader([
self.jinja_env.loader,
jinja2.PrefixLoader({
'debug_tb_mongo': jinja2.PackageLoader(__name__, 'templates')
})
])
filters = ('format_stack_trace', 'embolden_file', 'format_dict',
'highlight', 'pluralize')
for jfilter in filters:
self.jinja_env.filters[jfilter] = getattr(jinja_filters, jfilter)
operation_tracker.install_tracker()
def process_request(self, request):
operation_tracker.reset()
def nav_title(self):
return 'MongoDB'
def nav_subtitle(self):
fun = lambda x, y: (x, len(y), '%.2f' % sum(z['time'] for z in y))
ctx = {'operations': [], 'count': 0, 'time': 0}
if operation_tracker.queries:
ctx['operations'].append(fun('read', operation_tracker.queries))
ctx['count'] += len(operation_tracker.queries)
ctx['time'] += sum(x['time'] for x in operation_tracker.queries)
if operation_tracker.inserts:
ctx['operations'].append(fun('insert', operation_tracker.inserts))
ctx['count'] += len(operation_tracker.inserts)
ctx['time'] += sum(x['time'] for x in operation_tracker.inserts)
if operation_tracker.updates:
ctx['operations'].append(fun('update', operation_tracker.updates))
ctx['count'] += len(operation_tracker.updates)
ctx['time'] += sum(x['time'] for x in operation_tracker.updates)
if operation_tracker.removes:
ctx['operations'].append(fun('delete', operation_tracker.removes))
ctx['count'] += len(operation_tracker.removes)
ctx['time'] += sum(x['time'] for x in operation_tracker.removes)
ctx['time'] = '%.2f' % ctx['time']
return self.render('debug_tb_mongo/mongo-panes-subtitle.html', ctx)
def title(self):
return 'MongoDB Operations'
def url(self):
return ''
def content(self):
context = self.context.copy()
context['queries'] = operation_tracker.queries
context['inserts'] = operation_tracker.inserts
context['updates'] = operation_tracker.updates
context['removes'] = operation_tracker.removes
return self.render('debug_tb_mongo/mongo-panel.html', context)
| bsd-3-clause | 3,575,773,966,745,471,000 | 36.616438 | 78 | 0.60488 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.