repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
idegtiarov/gnocchi-rep | gnocchi/ceilometer/utils.py | 1 | 1259 | #
# Copyright 2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.v2_0 import client as ksclient
from oslo_config import cfg
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
def get_keystone_client():
return ksclient.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
tenant_id=cfg.CONF.service_credentials.os_tenant_id,
tenant_name=cfg.CONF.service_credentials.os_tenant_name,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=cfg.CONF.service_credentials.os_auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure)
| apache-2.0 | -8,211,332,044,180,262,000 | 39.612903 | 75 | 0.746624 | false |
mshunshin/SegNetCMR | pydicom/charset.py | 1 | 5681 | # charset.py
"""Handle alternate character sets for character strings."""
#
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
#
from pydicom import compat
from pydicom.config import logger
from pydicom.valuerep import PersonNameUnicode, text_VRs
from pydicom.compat import in_py2
# Map DICOM Specific Character Set to python equivalent
python_encoding = {
'': 'iso8859', # default character set for DICOM
'ISO_IR 6': 'iso8859', # alias for latin_1 too
'ISO_IR 100': 'latin_1',
'ISO_IR 101': 'iso8859_2',
'ISO_IR 109': 'iso8859_3',
'ISO_IR 110': 'iso8859_4',
'ISO_IR 126': 'iso_ir_126', # Greek
'ISO_IR 127': 'iso_ir_127', # Arab
'ISO_IR 138': 'iso_ir_138', # Hebrew
'ISO_IR 144': 'iso_ir_144', # Russian
'ISO_IR 148': 'iso8859_5',
# Thai 'ISO_IR 166': 'XXXXX', No idea what this maps too
'ISO 2022 IR 6': 'iso8859', # alias for latin_1 too
'ISO 2022 IR 13': 'shift_jis',
'ISO 2022 IR 87': 'iso2022_jp',
'ISO 2022 IR 100': 'latin_1',
'ISO 2022 IR 101': 'iso8859_2',
'ISO 2022 IR 109': 'iso8859_3',
'ISO 2022 IR 110': 'iso8859_4',
'ISO 2022 IR 126': 'iso_ir_126',
'ISO 2022 IR 127': 'iso_ir_127', # Arab
'ISO 2022 IR 138': 'iso_ir_138',
'ISO 2022 IR 144': 'iso_ir_144',
'ISO 2022 IR 148': 'iso8859_5',
'ISO 2022 IR 149': 'euc_kr', # needs cleanup via clean_escseq()
# Japanesse 'ISO 2022 IR 159': 'XXXX',
'ISO_IR 192': 'UTF8', # from Chinese example, 2008 PS3.5 Annex J p1-4
'GB18030': 'GB18030',
}
default_encoding = "iso8859"
def clean_escseq(element, encodings):
"""Remove escape sequences that Python does not remove from
Korean encoding ISO 2022 IR 149 due to the G1 code element.
"""
if 'euc_kr' in encodings:
return element.replace(
"\x1b\x24\x29\x43", "").replace("\x1b\x28\x42", "")
else:
return element
# DICOM PS3.5-2008 6.1.1 (p 18) says:
# default is ISO-IR 6 G0, equiv to common chr set of ISO 8859 (PS3.5 6.1.2.1)
# (0008,0005) value 1 can *replace* the default encoding...
# for VRs of SH, LO, ST, LT, PN and UT (PS3.5 6.1.2.3)...
# with a single-byte character encoding
# if (0008,0005) is multi-valued, then value 1 (or default if blank)...
# is used until code extension escape sequence is hit,
# which can be at start of string, or after CR/LF, FF, or
# in Person Name PN, after ^ or =
# NOTE also that 7.5.3 SEQUENCE INHERITANCE states that if (0008,0005)
# is not present in a sequence item then it is inherited from its parent.
def convert_encodings(encodings):
"""Converts DICOM encodings into corresponding python encodings"""
# If a list if passed, we don't want to modify the list in place so copy it
encodings = encodings[:]
if isinstance(encodings, compat.string_types):
encodings = [encodings]
elif not encodings[0]:
encodings[0] = 'ISO_IR 6'
try:
encodings = [python_encoding[x] for x in encodings]
except KeyError: # Assume that it is already the python encoding (is there a way to check this?)
pass
if len(encodings) == 1:
encodings = [encodings[0]] * 3
elif len(encodings) == 2:
encodings.append(encodings[1])
return encodings
def decode(data_element, dicom_character_set):
"""Apply the DICOM character encoding to the data element
data_element -- DataElement instance containing a value to convert
dicom_character_set -- the value of Specific Character Set (0008,0005),
which may be a single value,
a multiple value (code extension), or
may also be '' or None.
If blank or None, ISO_IR 6 is used.
"""
if not dicom_character_set:
dicom_character_set = ['ISO_IR 6']
encodings = convert_encodings(dicom_character_set)
# decode the string value to unicode
# PN is special case as may have 3 components with differenct chr sets
if data_element.VR == "PN":
# logger.warn("%s ... type: %s" %(str(data_element), type(data_element.VR)))
if not in_py2:
if data_element.VM == 1:
data_element.value = data_element.value.decode(encodings)
else:
data_element.value = [val.decode(encodings) for val in data_element.value]
else:
if data_element.VM == 1:
data_element.value = PersonNameUnicode(data_element.value, encodings)
else:
data_element.value = [PersonNameUnicode(value, encodings)
for value in data_element.value]
if data_element.VR in text_VRs:
# Remove the first encoding if this is a multi-byte encoding
if len(encodings) > 1:
del encodings[0]
# You can't re-decode unicode (string literals in py3)
if data_element.VM == 1:
if isinstance(data_element.value, compat.text_type):
return
data_element.value = clean_escseq(
data_element.value.decode(encodings[0]), encodings)
else:
output = list()
for value in data_element.value:
if isinstance(value, compat.text_type):
output.append(value)
else:
output.append(clean_escseq(value.decode(encodings[0]), encodings))
data_element.value = output
| mit | 4,459,642,833,566,833,000 | 36.375 | 101 | 0.607816 | false |
Krigu/python_fun | Heidi/FileParser.py | 1 | 1629 | TORCH_START_VALUE = 15
THING_START_VALUE = 20
INVISIBLE_START_VALUE = 21
FANTASTIC_START_VALUE = 3
class Story:
heros = ["Heidi", "Fantastic", "Tourch", "Thing", "Invisible"]
heidi = 0
fantastic = FANTASTIC_START_VALUE
torch = TORCH_START_VALUE
thing = THING_START_VALUE
invisible = INVISIBLE_START_VALUE
def act1_scene1(self):
self.fantastic = 1
self.invisible = INVISIBLE_START_VALUE
if self.fantastic == self.invisible:
self.act1_scene2()
else:
self.torch = 4
print(self.fantastic)
self.act1_scene2()
def act1_scene2(self):
self.thing = THING_START_VALUE
self.fantastic = 2
self.act1_scene3()
def act1_scene3(self):
if self.thing <= 1:
self.act1_scene4()
else:
self.fantastic = 4
self.thing -= 1
self.act1_scene3()
def act1_scene4(self):
self.invisible += self.fantastic / 2
self.torch -= 1
if self.thing <= self.torch:
self.act1_scene2()
else:
print(self.invisible)
self.act1_scene3()
def act2_scene1(self):
self.torch = 0
print(self.torch)
self.torch = TORCH_START_VALUE
self.act2_scene2()
def act2_scene2(self):
if self.torch % 2 == 1:
print(self.fantastic)
else:
self.thing = self.torch / 2
self.fantastic += 1
self.torch = self.thing
if self.fantastic <= 32:
self.act2_scene2()
Story().act1_scene1()
| gpl-3.0 | 7,884,140,570,094,559,000 | 23.313433 | 66 | 0.54205 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/utility/techsupport_args.py | 1 | 1326 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class techsupport_args :
""" Provides additional arguments required for fetching the techsupport resource.
"""
def __init__(self) :
self._scope = ""
@property
def scope(self) :
"""Use this option to run showtechsupport on present node or all cluster nodes.<br/>Default value: NODE<br/>Possible values = NODE, CLUSTER.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
"""Use this option to run showtechsupport on present node or all cluster nodes.<br/>Default value: NODE<br/>Possible values = NODE, CLUSTER
"""
try :
self._scope = scope
except Exception as e:
raise e
class Scope:
NODE = "NODE"
CLUSTER = "CLUSTER"
| apache-2.0 | 4,549,954,713,833,272,300 | 28.466667 | 142 | 0.706637 | false |
cheapjack/MemoryCraft | MemoryCloud1.py | 1 | 1993 | #!/usr/bin/python
#Install the modules we need
#from pyfirmata import Arduino, util, INPUT
from mcpi import minecraft
from mcpi import minecraftstuff
from time import sleep
import server
import serial
# Set up a connection to the Arduino/Shrimp if we need it
#PORT = "/dev/tty.SLAB_USBtoUART"
#ser = serial.Serial(PORT, 9600)
# Connect to the server: we use the imported server.py to make it work with CloudMaker
mc = minecraft.Minecraft.create(server.address)
#Post a message to the minecraft chat window
mc.postToChat("Ready to read Memory!")
# Use the command /getpos or F3 in Minecraft client to find out where you are then use those
# x, y, z coordinates to build things
# translate CloudMaker coords for mcpi ones
# add this to x
mcx = 177
# - this from y
mcy = 64
# - this from z
mcz = 135
# Text Bubble 1
def MemoryCloud1(startx,starty,startz, chartwidth, chartheight, chartdepth, blocktype, blockid):
# Main Bubble
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + chartwidth, (starty-mcy) + chartheight, (startz - mcz) + chartdepth, blocktype, blockid)
# inset bottom
mc.setBlocks((startx + mcx) + 1, (starty-mcy) - 1, (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) -1, (startz - mcz) + chartdepth, blocktype, blockid)
#inset top
mc.setBlocks((startx + mcx) + 1, (starty-mcy) + (chartheight + 1), (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) + (chartheight + 1), (startz - mcz) + chartdepth, blocktype, blockid)
# If you want to add a bubble diagram, insert your coordinates
# Then use /js blocktype("My Message", blockid) while facing the block where you want to write
#MemoryCloud1(-343, 75, -15, 44, 14, 2, 35, 0)
#MemoryCloud1(-343, 110, -15, 44, 14, 2, 35, 0)
#MemoryCloud1(-343, 75, -15, 44, 14, 2, 0)
#MemoryCloud1(-343, 100, -15, 44, 14, 2, 0)
# the memory cloud funtction is (myposx, myposy, myposz, width, height, thickness,
# blocktype, blockidoption)
MemoryCloud1(332, 100, -1185, 44, 4, 2, 35, 0)
#
| mit | 4,954,500,081,771,976,000 | 35.907407 | 197 | 0.707978 | false |
Bobox214/ZemkaBot | tools/kbHit.py | 1 | 1619 | import sys
import termios
import atexit
from select import select
class KBHit(object):
def __init__(self):
'''Creates a KBHit object that you can call to do various keyboard things.
'''
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit
atexit.register(self.set_normal_term)
def set_normal_term(self):
''' Resets to normal terminal. On Windows this is a no-op.
'''
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def getch(self):
''' Returns a keyboard character after kbhit() has been called.
Should not be called in the same program as getarrow().
'''
return sys.stdin.read(1)
def getarrow(self):
''' Returns an arrow-key code after kbhit() has been called. Codes are
0 : up
1 : right
2 : down
3 : left
Should not be called in the same program as getch().
'''
c = sys.stdin.read(3)[2]
vals = [65, 67, 66, 68]
return vals.index(ord(c.decode('utf-8')))
def kbhit(self):
''' Returns True if keyboard character was hit, False otherwise.
'''
dr,dw,de = select([sys.stdin], [], [], 0)
return dr != []
# Test
if __name__ == "__main__":
kb = KBHit()
print('Hit any key, or ESC to exit')
while True:
if kb.kbhit():
c = kb.getch()
if ord(c) == 27: # ESC
break
print(c)
kb.set_normal_term()
| mit | 9,086,489,840,359,184,000 | 22.128571 | 76 | 0.650401 | false |
clay584/IOS-to-HTML | iostohtml.py | 1 | 3208 | #!/usr/bin/env python
from ciscoconfparse import CiscoConfParse
import re
def read_in_file(filename):
return CiscoConfParse(filename)
def find_acls(parsed_config):
acl_names = []
# Get standard ACL numbers/names
standard_acls = parsed_config.find_objects('^access-list')
for acl in standard_acls:
try:
acl_names.append(re.search(r'^access-list (\S+)',
acl.text).group(1))
except:
pass
# Get extended ACL names
acls = parsed_config.find_objects('^ip access-list')
for acl in acls:
try:
acl_names.append(re.search(r'^ip access-list .+ (\S+)$',
acl.text).group(1))
except:
pass
unique_acls = set(acl_names)
return unique_acls
def find_class_maps(parsed_config):
# Get class-map names
cmaps = []
class_map_ojbs = parsed_config.find_objects('^class-map')
for cmap in class_map_ojbs:
try:
cmaps.append(re.search(r'^class-map.+ (\S+)$',
cmap.text).group(1))
except:
pass
return set(cmaps)
def find_policy_maps(parsed_config):
# Get policy-map names
pmaps = []
pmap_objs = parsed_config.find_objects('^policy-map')
for pmap in pmap_objs:
try:
pmaps.append(re.search(r'^policy-map .+ (\S+)$',
pmap.text).group(1))
except:
pass
return set(pmaps)
def find_route_maps(parsed_config):
# Get route-map names
rmaps = []
rmap_objs = parsed_config.find_objects('^route-map')
for rmap in rmap_objs:
try:
rmaps.append(re.search(r'^route-map (\S+).*$',
rmap.text).group(1))
except:
pass
return set(rmaps)
def find_interfaces(parsed_config):
# Get interface names
intfs = []
intf_objs = parsed_config.find_objects('^interface')
for intf in intf_objs:
try:
intfs.append(re.search(r'^interface (\S+)$',
intf.text).group(1))
except:
pass
return set(intfs)
def find_pointers_to_acls(parsed_config, acls):
valid_pointers = ['ip access-group']
real_pointers = []
for acl in acls:
# find each line where there is a valid pointer.
# Capture the pointer line text, pointer name, and the pointee name
def main():
filename = 'startup-config.txt'
parsed_config = read_in_file(filename)
acls = find_acls(parsed_config)
for acl in acls:
print acl
print '--------------------'
cmaps = find_class_maps(parsed_config)
for cmap in cmaps:
print cmap
print '--------------------'
pmaps = find_policy_maps(parsed_config)
for pmap in pmaps:
print pmap
print '--------------------'
rmaps = find_route_maps(parsed_config)
for rmap in rmaps:
print rmap
print '--------------------'
intfs = find_interfaces(parsed_config)
for intf in intfs:
print intf
if __name__ == '__main__':
main()
| gpl-2.0 | 6,242,614,476,191,175,000 | 20.972603 | 75 | 0.532107 | false |
axce1/PyProjects | Graphics/watermark.py | 1 | 1104 | import Image, ImageEnhance
def add_watermark(image, watermark, opacity=1, wm_interval=None):
assert opacity >= 0 and opacity <= 1
if opacity < 1:
if watermark.mode != 'RGBA':
watermark = watermark.convert('RGBA')
else:
watermark = watermark.copy()
alpha = watermark.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
watermark.putalpha(alpha)
layer = Image.new('RGBA', image.size, (0,0,0,0))
if wm_interval:
for y in range(0, image.size[1], watermark.size[1]+wm_interval):
for x in range(0, image.size[0], watermark.size[0]+wm_interval):
layer.paste(watermark, (x, y))
else:
layer.paste(watermark, (0,image.size[0]))
return Image.composite(layer, image, layer)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print ('Usage: python watermark.py image-file watermark-image-file')
sys.exit(1)
img = Image.open(sys.argv[1])
wm = Image.open(sys.argv[2])
add_watermark(img, wm, 0.4, 100).save("image_wm.png")
| gpl-2.0 | 2,413,271,779,597,631,000 | 33.5 | 76 | 0.601449 | false |
codingenesis/ansible_mysql_rds_playbook | hack/rds.py | 1 | 44216 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it auotmaticaly defaults to what is expected for each c(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}"
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
DEFAULT_PORTS= {
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError, e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound, e:
return None
except Exception, e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Endpoint exists only if the instance is available
if self.status == 'available':
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception, e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if self.status == 'available':
d['endpoint'] = self.instance["Endpoint"]["Address"]
d['port'] = self.instance["Endpoint"]["Port"]
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException, e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException, e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException, e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException, e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception, e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade2', 'upgrade' ]
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'upgrade2': 'allow_major_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
upgrade2 = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
force_failover = dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | 3,260,466,123,463,195,600 | 39.087035 | 322 | 0.635811 | false |
jnimmo/pyenvisalink | pyenvisalink/honeywell_client.py | 1 | 10341 | import logging
import json
import re
import asyncio
from pyenvisalink import EnvisalinkClient
from pyenvisalink.honeywell_envisalinkdefs import *
_LOGGER = logging.getLogger(__name__)
class HoneywellClient(EnvisalinkClient):
"""Represents a honeywell alarm client."""
@asyncio.coroutine
def keep_alive(self):
"""Send a keepalive command to reset it's watchdog timer."""
while not self._shutdown:
if self._loggedin:
self.send_command(evl_Commands['KeepAlive'], '')
yield from asyncio.sleep(self._alarmPanel.keepalive_interval, loop=self._eventLoop)
@asyncio.coroutine
def periodic_zone_timer_dump(self):
"""Used to periodically get the zone timers to make sure our zones are updated."""
while not self._shutdown:
if self._loggedin:
self.dump_zone_timers()
yield from asyncio.sleep(self._alarmPanel.zone_timer_interval, loop=self._eventLoop)
def send_command(self, code, data):
"""Send a command in the proper honeywell format."""
to_send = '^' + code + ',' + data + '$'
self.send_data(to_send)
def dump_zone_timers(self):
"""Send a command to dump out the zone timers."""
self.send_command(evl_Commands['DumpZoneTimers'], '')
def keypresses_to_partition(self, partitionNumber, keypresses):
"""Send keypresses to a particular partition."""
for char in keypresses:
self.send_command(evl_Commands['PartitionKeypress'], str.format("{0},{1}", partitionNumber, char))
def arm_stay_partition(self, code, partitionNumber):
"""Public method to arm/stay a partition."""
self.keypresses_to_partition(partitionNumber, code + '3')
def arm_away_partition(self, code, partitionNumber):
"""Public method to arm/away a partition."""
self.keypresses_to_partition(partitionNumber, code + '2')
def arm_max_partition(self, code, partitionNumber):
"""Public method to arm/max a partition."""
self.keypresses_to_partition(partitionNumber, code + '4')
def disarm_partition(self, code, partitionNumber):
"""Public method to disarm a partition."""
self.keypresses_to_partition(partitionNumber, code + '1')
def panic_alarm(self, panicType):
"""Public method to raise a panic alarm."""
self.keypresses_to_partition(1, evl_PanicTypes[panicType])
def parseHandler(self, rawInput):
"""When the envisalink contacts us- parse out which command and data."""
cmd = {}
parse = re.match('([%\^].+)\$', rawInput)
if parse and parse.group(1):
# keep first sentinel char to tell difference between tpi and
# Envisalink command responses. Drop the trailing $ sentinel.
inputList = parse.group(1).split(',')
code = inputList[0]
cmd['code'] = code
cmd['data'] = ','.join(inputList[1:])
elif not self._loggedin:
# assume it is login info
code = rawInput
cmd['code'] = code
cmd['data'] = ''
else:
_LOGGER.error("Unrecognized data recieved from the envisalink. Ignoring.")
_LOGGER.debug(str.format("Code:{0} Data:{1}", code, cmd['data']))
try:
cmd['handler'] = "handle_%s" % evl_ResponseTypes[code]['handler']
cmd['callback'] = "callback_%s" % evl_ResponseTypes[code]['handler']
except KeyError:
_LOGGER.warning(str.format('No handler defined in config for {0}, skipping...', code))
return cmd
def handle_login(self, code, data):
"""When the envisalink asks us for our password- send it."""
self.send_data(self._alarmPanel.password)
def handle_command_response(self, code, data):
"""Handle the envisalink's initial response to our commands."""
responseString = evl_TPI_Response_Codes[data]
_LOGGER.debug("Envisalink response: " + responseString)
if data != '00':
logging.error("error sending command to envisalink. Response was: " + responseString)
def handle_poll_response(self, code, data):
"""Handle the response to our keepalive messages."""
self.handle_command_response(code, data)
def handle_keypad_update(self, code, data):
"""Handle the response to when the envisalink sends keypad updates our way."""
dataList = data.split(',')
# make sure data is in format we expect, current TPI seems to send bad data every so ofen
#TODO: Make this a regex...
if len(dataList) != 5 or "%" in data:
_LOGGER.error("Data format invalid from Envisalink, ignoring...")
return
partitionNumber = int(dataList[0])
flags = IconLED_Flags()
flags.asShort = int(dataList[1], 16)
beep = evl_Virtual_Keypad_How_To_Beep.get(dataList[3], 'unknown')
alpha = dataList[4]
_LOGGER.debug("Updating our local alarm state...")
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update({'alarm': bool(flags.alarm), 'alarm_in_memory': bool(flags.alarm_in_memory), 'armed_away': bool(flags.armed_away),
'ac_present': bool(flags.ac_present), 'armed_bypass': bool(flags.bypass), 'chime': bool(flags.chime),
'armed_zero_entry_delay': bool(flags.armed_zero_entry_delay), 'alarm_fire_zone': bool(flags.alarm_fire_zone),
'trouble': bool(flags.system_trouble), 'ready': bool(flags.ready), 'fire': bool(flags.fire),
'armed_stay': bool(flags.armed_stay),
'alpha': alpha,
'beep': beep,
})
_LOGGER.debug(json.dumps(self._alarmPanel.alarm_state['partition'][partitionNumber]['status']))
def handle_zone_state_change(self, code, data):
"""Handle when the envisalink sends us a zone change."""
# Envisalink TPI is inconsistent at generating these
bigEndianHexString = ''
# every four characters
inputItems = re.findall('....', data)
for inputItem in inputItems:
# Swap the couples of every four bytes
# (little endian to big endian)
swapedBytes = []
swapedBytes.insert(0, inputItem[0:2])
swapedBytes.insert(0, inputItem[2:4])
# add swapped set of four bytes to our return items,
# converting from hex to int
bigEndianHexString += ''.join(swapedBytes)
# convert hex string to 64 bit bitstring TODO: THIS IS 128 for evl4
if self._alarmPanel.envisalink_version < 4:
bitfieldString = str(bin(int(bigEndianHexString, 16))[2:].zfill(64))
else:
bitfieldString = str(bin(int(bigEndianHexString, 16))[2:].zfill(128))
# reverse every 16 bits so "lowest" zone is on the left
zonefieldString = ''
inputItems = re.findall('.' * 16, bitfieldString)
for inputItem in inputItems:
zonefieldString += inputItem[::-1]
for zoneNumber, zoneBit in enumerate(zonefieldString, start=1):
self._alarmPanel.alarm_state['zone'][zoneNumber]['status'].update({'open': zoneBit == '1', 'fault': zoneBit == '1'})
if zoneBit == '1':
self._alarmPanel.alarm_state['zone'][zoneNumber]['last_fault'] = 0
_LOGGER.debug("(zone %i) is %s", zoneNumber, "Open/Faulted" if zoneBit == '1' else "Closed/Not Faulted")
def handle_partition_state_change(self, code, data):
"""Handle when the envisalink sends us a partition change."""
for currentIndex in range(0, 8):
partitionStateCode = data[currentIndex * 2:(currentIndex * 2) + 2]
partitionState = evl_Partition_Status_Codes[str(partitionStateCode)]
partitionNumber = currentIndex + 1
previouslyArmed = self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].get('armed', False)
armed = partitionState['name'] in ('ARMED_STAY', 'ARMED_AWAY', 'ARMED_MAX')
self._alarmPanel.alarm_state.update({'arm': not armed, 'disarm': armed, 'cancel': bool(partitionState['name'] == 'EXIT_ENTRY_DELAY')})
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update({'exit_delay': bool(partitionState['name'] == 'EXIT_ENTRY_DELAY' and not previouslyArmed),
'entry_delay': bool(partitionState['name'] == 'EXIT_ENTRY_DELAY' and previouslyArmed),
'armed': armed,
'ready': bool(partitionState['name'] == 'READY' or partitionState['name'] == 'READY_BYPASS')})
if partitionState['name'] == 'NOT_READY': self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update({'ready': False})
_LOGGER.debug('Parition ' + str(partitionNumber) + ' is in state ' + partitionState['name'])
_LOGGER.debug(json.dumps(self._alarmPanel.alarm_state['partition'][partitionNumber]['status']))
def handle_realtime_cid_event(self, code, data):
"""Handle when the envisalink sends us an alarm arm/disarm/trigger."""
eventTypeInt = int(data[0])
eventType = evl_CID_Qualifiers[eventTypeInt]
cidEventInt = int(data[1:4])
cidEvent = evl_CID_Events[cidEventInt]
partition = data[4:6]
zoneOrUser = int(data[6:9])
_LOGGER.debug('Event Type is ' + eventType)
_LOGGER.debug('CID Type is ' + cidEvent['type'])
_LOGGER.debug('CID Description is ' + cidEvent['label'])
_LOGGER.debug('Partition is ' + partition)
_LOGGER.debug(cidEvent['type'] + ' value is ' + str(zoneOrUser))
return cidEvent
| mit | -3,805,397,457,246,989,300 | 50.447761 | 198 | 0.582922 | false |
gregorymfoster/honeypot | honeypot/honeypot/www/models.py | 1 | 9081 | import random
import string
import logging
import datetime
from dateutil.relativedelta import relativedelta
# An abstracted data model for fetching data from honeypot_logs
class HoneypotLogTable(object):
# Create model for a specific airflow database
def __init__(self, table_name, sql_conn_id='airflow_db'):
self.table_name = table_name
self.sql_conn_id = sql_conn_id
# Return data on a specific dag or task
def get_rows_for_data_request(self, measure, dag, name):
args = {'from': 'FROM ' + self.table_name}
# Infer the grain depending if a dag is specified
grain = 'dag'
if dag and dag != name:
grain = 'task'
args['where'] = "WHERE {grain}_id = '{name}'".format(**locals())
if measure == 'io':
args['select'] = '''SELECT
(SUM(hdfs_reads) + SUM(hdfs_writes)) AS value,
input_date AS ds'''
elif measure == 'cpu':
args['select'] = '''SELECT
SUM(cpu_time) AS value,
input_date AS ds'''
elif measure == 'mappers':
args['select'] = '''SELECT
SUM(num_mappers) AS value,
input_date AS ds'''
elif measure == 'reducers':
args['select'] = '''SELECT
SUM(num_reducers) AS value,
input_date AS ds'''
args['order'] = 'ORDER BY input_date'
args['group'] = 'GROUP BY input_date'
query = args['select'] + '\n'
query += args['from'] + '\n'
query += args['where'] + '\n'
query += args['group'] + '\n'
query += args['order'] + ';'
logging.info(query)
db = self.get_sql_hook(self.sql_conn_id)
data = db.get_pandas_df(query)
return data
def get_row_for_detail_request(self, dag, name):
# Infer the grain depending if a dag is specified
grain = 'dag'
if dag:
grain = 'task'
args = {'from': 'FROM ' + self.table_name}
args['where'] = "WHERE {grain}_id = '{name}'".format(**locals())
args['select'] = 'SELECT owner'
args['group'] = 'GROUP BY owner'
query = args['select'] + '\n'
query += args['from'] + '\n'
query += args['where'] + '\n'
query += args['group'] + ';'
logging.info(query)
db = self.get_sql_hook(self.sql_conn_id)
data = db.get_pandas_df(query)
# logging.info(data)
return data
# Return summaries of all dags or tasks based on some parameters
def get_rows_for_filter(self, measure, time, dag):
args = {'from': 'FROM ' + self.table_name}
# Create the correct date range
end = str(datetime.datetime.utcnow())
start = None
if time == 'year':
date = (datetime.datetime.utcnow() - datetime.timedelta(years=1))
start = date.strftime("%Y-%m-%d %H:%M:%S")
elif time == 'week':
date = (datetime.datetime.utcnow() - datetime.timedelta(days=7))
start = date.strftime("%Y-%m-%d %H:%M:%S")
elif time == 'month':
date = (datetime.datetime.utcnow() - relativedelta(months=1))
start = date.strftime("%Y-%m-%d %H:%M:%S")
args['where'] = """WHERE input_date BETWEEN '{start}'
AND '{end}'""".format(**locals())
# adjust if user has selected a dag or not
grain = 'dag'
if (dag):
args['where'] += (" AND dag_id = '{dag}'".format(**locals()))
grain = 'task'
if measure == 'io':
args['select'] = """SELECT {grain}_id AS name,
(AVG(hdfs_reads)+AVG(hdfs_writes)) AS value""".format(**locals())
elif measure == 'cpu':
args['select'] = """SELECT {grain}_id AS name,
AVG(cpu_time) AS value""".format(**locals())
elif measure == 'mappers':
args['select'] = """SELECT {grain}_id AS name,
AVG(num_mappers) AS value""".format(**locals())
elif measure == 'reducers':
args['select'] = """SELECT {grain}_id AS name,
AVG(num_reducers) AS value""".format(**locals())
args['order'] = 'ORDER BY value DESC'
args['group'] = 'GROUP BY {grain}_id'.format(**locals())
query = args['select'] + '\n'
query += args['from'] + '\n'
query += args['where'] + '\n'
query += args['group'] + '\n'
query += args['order'] + ';'
logging.info(query)
db = self.get_sql_hook(self.sql_conn_id)
data = db.get_pandas_df(query)
# logging.info(data)
return data
# Create a local test database
# Inputs allow for old table to be dropped, and new data to be generated
def create_table(self, drop=False, with_test_data=False):
"""
Creates the honeypot_log table
"""
db = self.get_sql_hook(self.sql_conn_id)
table = self.table_name
if drop:
sql = "DROP TABLE IF EXISTS {table};".format(**locals())
logging.info("Executing SQL: \n" + sql)
db.run(sql)
sql = """CREATE TABLE IF NOT EXISTS honeypot_logs (
log_filepath VARCHAR(255) ,
dag_id VARCHAR(255),
task_id VARCHAR(255),
job_num INT,
execution_date DATETIME,
duration DOUBLE,
input_date DATETIME,
num_mappers INT,
num_reducers INT,
cpu_time LONG,
hdfs_reads LONG,
hdfs_writes LONG,
owner VARCHAR(255));""".format(**locals())
logging.info("Executing SQL: \n" + sql)
db.run(sql)
if with_test_data:
self.create_test_data()
# A helper function to create a SQL insertion string
def insert_string_from_dict(self, d):
return ('INSERT INTO honeypot_logs VALUES('
'\'{log_filepath}\', '
'\'{dag_id}\', '
'\'{task_id}\', '
'{job_num}, '
'\'{execution_date}\', '
'{task_duration}, '
'\'{input_date}\', '
'{mappers}, '
'{reducers}, '
'{cpu_time}, '
'{hdfs_reads}, '
'{hdfs_writes}, '
'\'{owner}\' '
');').format(**d)
# A helper function to generate a random name
def random_string(self, lowerLength, upperLength):
return ''.join(random.choice(string.ascii_letters)
for _ in range(random.randint(lowerLength, upperLength)))
# A helper function to add random data to local database for testing
def create_test_data(self):
"""
Creates test data
"""
for i in range(10):
print "inserting a test dag"
i = str(i)
batch = 100
db = self.get_sql_hook(self.sql_conn_id)
# gen dates
base = datetime.datetime.today()
dates = [base - datetime.timedelta(days=x) for x in range(0, batch)]
date_strings = [d.strftime("%Y-%m-%dT%H:%M:%SZ") for d in dates]
strings = []
for date_string in date_strings:
num_jobs = random.randint(1, 7)
d = {}
d['log_filepath'] = 'fake_filepath' + i + '_' + date_string
d['dag_id'] = 'fake_dag' + i
d['task_id'] = 'fake_task' + i
d['execution_date'] = date_string
d['task_duration'] = str(random.randint(100, 10000))
d['input_date'] = date_string
d['owner'] = 'fake_owner' + i
for job_num in range(num_jobs):
d['job_num'] = str(job_num)
d['mappers'] = str(random.randint(1, 100))
d['reducers'] = str(random.randint(1, 100))
d['cpu_time'] = str(random.randint(100, 10000))
d['hdfs_reads'] = str(random.randint(100, 100000))
d['hdfs_writes'] = str(random.randint(100, 100000))
strings.append(self.insert_string_from_dict(d))
logging.info(strings)
for insertion_string in strings:
logging.info("Executing SQL: \n" + insertion_string)
try:
db.run(insertion_string)
except Exception as e:
print "Failed to insert row: ", e
# A helper to create the approprate hook depending on local or airflow use
def get_sql_hook(self, sql_conn_id):
if 'sqlite' in sql_conn_id:
from airflow.hooks import SqliteHook
return SqliteHook(sql_conn_id)
else:
from airflow.hooks import MySqlHook
return MySqlHook(sql_conn_id)
| apache-2.0 | 1,176,233,732,038,446,800 | 35.765182 | 80 | 0.49565 | false |
Banbury/cartwheel-3d | Python/GLUtilsTest.py | 1 | 10680 | from App.UtilFuncs import fancify
print fancify(
"""Character(
root = ArticulatedRigidBody(
name = "pelvis",
meshes = [ (path.join(meshDir, "pelvis_2_b.obj"), colourDark),
(path.join(meshDir, "pelvis_2_s.obj"), colourLight) ],
mass = 12.9,
moi = (0.0705, 0.11, 0.13),
cdps = [ SphereCDP((0,-0.075,0), 0.12) ],
pos = (0, 1.035, 0.2),
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
arbs = [
ArticulatedRigidBody(
name = "torso",
meshes = [ (path.join(meshDir, "torso_2_b.obj"), colourDark),
(path.join(meshDir, "torso_2_s_v2.obj"), colourLight) ],
mass = 22.5,
moi = (0.34, 0.21, 0.46),
cdps = [ SphereCDP((0,0,0.01), 0.11) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "head",
meshes = [ (path.join(meshDir, "head_b.obj"), colourDark),
(path.join(meshDir, "head_s.obj"), colourLight) ],
mass = 5.2,
moi = (0.04, 0.02, 0.042),
cdps = [ SphereCDP((0,0.04,0), 0.11) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "lUpperArm",
meshes = [ (path.join(meshDir, "lUpperArm.obj"), colourDark) ],
mass = 2.2,
moi = (0.005, 0.02, 0.02),
cdps = [ CapsuleCDP((-0.15,0,0), (0.15,0,0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "lLowerArm",
meshes = [ (path.join(meshDir, "lLowerArm.obj"), colourDark) ],
mass = 1.7,
moi = (0.0024, 0.025, 0.025),
cdps = [ CapsuleCDP((-0.15,0,0), (0.15,0,0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "rUpperArm",
meshes = [ (path.join(meshDir, "rUpperArm.obj"), colourDark) ],
mass = 2.2,
moi = (0.005, 0.02, 0.02),
cdps = [ CapsuleCDP((-0.15,0,0), (0.15,0,0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "rLowerArm",
meshes = [ (path.join(meshDir, "rLowerArm.obj"), colourDark) ],
mass = 1.7,
moi = (0.0024, 0.025, 0.025),
cdps = [ CapsuleCDP((-0.15,0,0), (0.15,0,0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "lUpperLeg",
meshes = [ (path.join(meshDir, "lUpperLeg.obj"), colourDark) ],
mass = 6.6,
moi = (0.15, 0.022, 0.15),
cdps = [ CapsuleCDP((0, 0.12, 0), (0, -0.26, 0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "lLowerLeg",
meshes = [ (path.join(meshDir, "lLowerLeg.obj"), colourDark) ],
mass = 3.2,
moi = (0.055, 0.007, 0.055),
cdps = [ CapsuleCDP((0, 0.12, 0), (0, -0.2, 0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "rUpperLeg",
meshes = [ (path.join(meshDir, "rUpperLeg.obj"), colourDark) ],
mass = 6.6,
moi = (0.15, 0.022, 0.15),
cdps = [ CapsuleCDP((0, 0.12, 0), (0, -0.26, 0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "rLowerLeg",
meshes = [ (path.join(meshDir, "rLowerLeg.obj"), colourDark) ],
mass = 3.2,
moi = (0.055, 0.007, 0.055),
cdps = [ CapsuleCDP((0, 0.12, 0), (0, -0.2, 0), 0.05) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35 ),
ArticulatedRigidBody(
name = "lFoot",
meshes = [ (path.join(meshDir, "lFoot.obj"), colourDark) ],
mass = 1.0,
moi = (0.007, 0.008, 0.002),
cdps = [ BoxCDP((-0.025, -0.033, -0.09), (0.025, 0.005, 0.055)) ],
# CDP_Sphere 0.025 -0.025 -0.08 0.01
# CDP_Sphere -0.025 -0.025 -0.08 0.01
# CDP_Sphere 0.02 -0.025 0.045 0.01
# CDP_Sphere -0.02 -0.025 0.045 0.01
frictionCoeff = 0.8,
restitutionCoeff = 0.35,
groundCoeffs = (0.0002, 0.2) ),
ArticulatedRigidBody(
name = "rFoot",
meshes = [ (path.join(meshDir, "rFoot.obj"), colourDark) ],
mass = 1.0,
moi = (0.007, 0.008, 0.002),
cdps = [ BoxCDP((-0.025, -0.033, -0.09), (0.025, 0.005, 0.055)) ],
# CDP_Sphere 0.025 -0.025 -0.08 0.01
# CDP_Sphere -0.025 -0.025 -0.08 0.01
# CDP_Sphere 0.02 -0.025 0.045 0.01
# CDP_Sphere -0.02 -0.025 0.045 0.01
frictionCoeff = 0.8,
restitutionCoeff = 0.35,
groundCoeffs = (0.0002, 0.2) ),
ArticulatedRigidBody(
name = "lToes",
meshes = [ (path.join(meshDir, "lToes.obj"), colourDark) ],
mass = 0.2,
moi = (0.002, 0.002, 0.0005),
cdps = [ SphereCDP((0.0, -0.005, 0.025), 0.01) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35,
groundCoeffs = (0.0002, 0.2) ),
ArticulatedRigidBody(
name = "rToes",
meshes = [ (path.join(meshDir, "rToes.obj"), colourDark) ],
mass = 0.2,
moi = (0.002, 0.002, 0.0005),
cdps = [ SphereCDP((0.0, -0.005, 0.025), 0.01) ],
frictionCoeff = 0.8,
restitutionCoeff = 0.35,
groundCoeffs = (0.0002, 0.2) )
],
joints = [
BallInSocketJoint(
name = "pelvis_torso",
parent = "pelvis",
child = "torso",
posInParent = (0, 0.17, -0.035),
posInChild = (0, -0.23, -0.01),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 1, 0),
limits = (-0.6, 0.6, -0.6, 0.6, -0.6, 0.6) ),
BallInSocketJoint(
name = "torso_head",
parent = "torso",
child = "head",
posInParent = (0, 0.1, -0.00),
posInChild = (0, -0.16, -0.025),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 1, 0),
limits = (-0.6, 0.6, -0.6, 0.6, -0.6, 0.6) ),
BallInSocketJoint(
name = "lShoulder",
parent = "torso",
child = "lUpperArm",
posInParent = (0.20, 0.07, 0.02),
posInChild = (-0.17, 0, 0),
swingAxis1 = (0, 0, 1),
twistAxis = ( 1, 0, 0),
limits = (-1.7, 1.7, -1.5, 1.5, -1.5, 1.5) ),
BallInSocketJoint(
name = "rShoulder",
parent = "torso",
child = "rUpperArm",
posInParent = (-0.20, 0.07, 0.02),
posInChild = (0.17, 0, 0),
swingAxis1 = (0, 0, 1),
twistAxis = ( 1, 0, 0),
limits = (-1.7, 1.7, -1.5, 1.5, -1.5, 1.5) ),
HingeJoint(
name = "lElbow",
parent = "lUpperArm",
child = "lLowerArm",
posInParent = (0.175, 0, 0.006),
posInChild = (-0.215, 0, 0),
axis = ( 0, 1, 0 ),
limits = (-2.7, 0) ),
HingeJoint(
name = "rElbow",
parent = "rUpperArm",
child = "rLowerArm",
posInParent = (-0.175, 0, 0.006),
posInChild = (0.215, 0, 0),
axis = ( 0, -1, 0 ),
limits = (-2.7, 0) ),
BallInSocketJoint(
name = "lHip",
parent = "pelvis",
child = "lUpperLeg",
posInParent = (0.1, -0.05, 0.0),
posInChild = (0, 0.21, 0),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 1, 0),
limits = (-1.3, 1.9, -1, 1, -0.25, 1) ),
BallInSocketJoint(
name = "rHip",
parent = "pelvis",
child = "rUpperLeg",
posInParent = (-0.1, -0.05, 0.0),
posInChild = (0, 0.21, 0),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 1, 0),
limits = (-1.3, 1.9, -1, 1, -1, 0.25) ),
HingeJoint(
name = "lKnee",
parent = "lUpperLeg",
child = "lLowerLeg",
posInParent = (0, -0.26, 0),
posInChild = (0, 0.21, 0),
axis = ( 1, 0, 0 ),
limits = (0, 2.5) ),
HingeJoint(
name = "rKnee",
parent = "rUpperLeg",
child = "rLowerLeg",
posInParent = (0, -0.26, 0),
posInChild = (0, 0.21, 0),
axis = ( 1, 0, 0 ),
limits = (0, 2.5) ),
UniversalJoint(
name = "lAnkle",
parent = "lLowerLeg",
child = "lFoot",
posInParent = (0, -0.25, 0.01),
posInChild = (0.0, 0.02, -0.04),
parentAxis = (1, 0, 0),
childAxis = (0, 0, 1),
limits = (-0.75, 0.75, -0.75, 0.75) ),
UniversalJoint(
name = "rAnkle",
parent = "rLowerLeg",
child = "rFoot",
posInParent = (0, -0.25, 0.01),
posInChild = (0.0, 0.02, -0.04),
parentAxis = (1, 0, 0),
childAxis = (0, 0, -1),
limits = (-0.75, 0.75, -0.75, 0.75) ),
HingeJoint(
name = "lToeJoint",
parent = "lFoot",
child = "lToes",
posInParent = (0, -0.02, 0.05),
posInChild = (0, 0, -0.025),
axis = ( 1, 0, 0 ),
limits = (-0.52, 0.02) ),
HingeJoint(
name = "rToeJoint",
parent = "rFoot",
child = "rToes",
posInParent = (0, -0.02, 0.05),
posInChild = (0, 0, -0.025),
axis = ( 1, 0, 0 ),
limits = (-0.52, 0.02) )
]
)""")
| apache-2.0 | -380,019,581,576,461,760 | 34.20339 | 79 | 0.408614 | false |
caladrel/trueskill_kicker | league/migrations/0001_initial.py | 1 | 3950 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('score_team1', models.PositiveSmallIntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], validators=[django.core.validators.MaxValueValidator(10)])),
('score_team2', models.PositiveSmallIntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], validators=[django.core.validators.MaxValueValidator(10)])),
('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)),
],
options={
'verbose_name_plural': 'matches',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, db_index=True)),
('mu', models.FloatField(default=25.0)),
('sigma', models.FloatField(default=8.333333333333334)),
('rank', models.FloatField(default=0.0, db_index=True)),
('attacker_mu', models.FloatField(default=25.0)),
('attacker_sigma', models.FloatField(default=8.333333333333334)),
('attacker_rank', models.FloatField(default=0.0, db_index=True)),
('defender_mu', models.FloatField(default=25.0)),
('defender_sigma', models.FloatField(default=8.333333333333334)),
('defender_rank', models.FloatField(default=0.0, db_index=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PlayerHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mu', models.FloatField(default=25.0)),
('sigma', models.FloatField(default=8.333333333333334)),
('rank', models.FloatField(default=0.0)),
('was_attacker', models.BooleanField(default=False)),
('seperate_mu', models.FloatField(default=25.0)),
('seperate_sigma', models.FloatField(default=8.333333333333334)),
('seperate_rank', models.FloatField(default=0.0)),
('match', models.ForeignKey(to='league.Match')),
('player', models.ForeignKey(to='league.Player')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='match',
name='team1_player1',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='team1_player2',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='team2_player1',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='team2_player2',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
]
| apache-2.0 | 6,388,180,689,287,207,000 | 43.382022 | 225 | 0.533418 | false |
zcbenz/cefode-chromium | tools/telemetry/telemetry/core/chrome/browser_backend.py | 1 | 7145 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib2
import httplib
import socket
import json
import re
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core import user_agent
from telemetry.core import wpr_modes
from telemetry.core import wpr_server
from telemetry.core.chrome import extension_dict_backend
from telemetry.core.chrome import tab_list_backend
from telemetry.core.chrome import tracing_backend
from telemetry.test import options_for_unittests
class ExtensionsNotSupportedException(Exception):
pass
class BrowserBackend(object):
"""A base class for browser backends. Provides basic functionality
once a remote-debugger port has been established."""
WEBPAGEREPLAY_HOST = '127.0.0.1'
def __init__(self, is_content_shell, supports_extensions, options):
self.browser_type = options.browser_type
self.is_content_shell = is_content_shell
self._supports_extensions = supports_extensions
self.options = options
self._browser = None
self._port = None
self._inspector_protocol_version = 0
self._chrome_branch_number = 0
self._webkit_base_revision = 0
self._tracing_backend = None
self.webpagereplay_local_http_port = util.GetAvailableLocalPort()
self.webpagereplay_local_https_port = util.GetAvailableLocalPort()
self.webpagereplay_remote_http_port = self.webpagereplay_local_http_port
self.webpagereplay_remote_https_port = self.webpagereplay_local_https_port
if options.dont_override_profile and not options_for_unittests.AreSet():
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
self._tab_list_backend = tab_list_backend.TabListBackend(self)
self._extension_dict_backend = None
if supports_extensions:
self._extension_dict_backend = \
extension_dict_backend.ExtensionDictBackend(self)
def SetBrowser(self, browser):
self._browser = browser
self._tab_list_backend.Init()
@property
def browser(self):
return self._browser
@property
def supports_extensions(self):
"""True if this browser backend supports extensions."""
return self._supports_extensions
@property
def tab_list_backend(self):
return self._tab_list_backend
@property
def extension_dict_backend(self):
return self._extension_dict_backend
def GetBrowserStartupArgs(self):
args = []
args.extend(self.options.extra_browser_args)
args.append('--disable-background-networking')
args.append('--metrics-recording-only')
args.append('--no-first-run')
if self.options.wpr_mode != wpr_modes.WPR_OFF:
args.extend(wpr_server.GetChromeFlags(
self.WEBPAGEREPLAY_HOST,
self.webpagereplay_remote_http_port,
self.webpagereplay_remote_https_port))
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.options.browser_user_agent_type))
extensions = [extension.path for extension in
self.options.extensions_to_load if not extension.is_component]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
component_extensions = [extension.path for extension in
self.options.extensions_to_load if extension.is_component]
component_extension_str = ','.join(component_extensions)
if len(component_extensions) > 0:
args.append('--load-component-extension=%s' % component_extension_str)
return args
@property
def wpr_mode(self):
return self.options.wpr_mode
def _WaitForBrowserToComeUp(self, timeout=None):
def IsBrowserUp():
try:
self.Request('', timeout=timeout)
except (socket.error, httplib.BadStatusLine, urllib2.URLError):
return False
else:
return True
try:
util.WaitFor(IsBrowserUp, timeout=30)
except util.TimeoutException:
raise exceptions.BrowserGoneException()
def AllExtensionsLoaded():
for e in self.options.extensions_to_load:
if not e.extension_id in self._extension_dict_backend:
return False
extension_object = self._extension_dict_backend[e.extension_id]
extension_object.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
if self._supports_extensions:
util.WaitFor(AllExtensionsLoaded, timeout=30)
def _PostBrowserStartupInitialization(self):
# Detect version information.
data = self.Request('version')
resp = json.loads(data)
if 'Protocol-Version' in resp:
self._inspector_protocol_version = resp['Protocol-Version']
if 'Browser' in resp:
branch_number_match = re.search('Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
webkit_version_match = re.search('\((trunk)?\@(\d+)\)',
resp['WebKit-Version'])
if branch_number_match:
self._chrome_branch_number = int(branch_number_match.group(1))
else:
# Content Shell returns '' for Browser, for now we have to
# fall-back and assume branch 1025.
self._chrome_branch_number = 1025
if webkit_version_match:
self._webkit_base_revision = int(webkit_version_match.group(2))
return
# Detection has failed: assume 18.0.1025.168 ~= Chrome Android.
self._inspector_protocol_version = 1.0
self._chrome_branch_number = 1025
self._webkit_base_revision = 106313
def Request(self, path, timeout=None):
url = 'http://localhost:%i/json' % self._port
if path:
url += '/' + path
req = urllib2.urlopen(url, timeout=timeout)
return req.read()
@property
def chrome_branch_number(self):
return self._chrome_branch_number
@property
def supports_tab_control(self):
return self._chrome_branch_number >= 1303
@property
def supports_tracing(self):
return self.is_content_shell or self._chrome_branch_number >= 1385
def StartTracing(self):
if self._tracing_backend is None:
self._tracing_backend = tracing_backend.TracingBackend(self._port)
self._tracing_backend.BeginTracing()
def StopTracing(self):
self._tracing_backend.EndTracing()
def GetTraceResultAndReset(self):
return self._tracing_backend.GetTraceResultAndReset()
def GetRemotePort(self, _):
return util.GetAvailableLocalPort()
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
def CreateForwarder(self, *port_pairs):
raise NotImplementedError()
def IsBrowserRunning(self):
raise NotImplementedError()
def GetStandardOutput(self):
raise NotImplementedError()
| bsd-3-clause | -2,395,234,556,662,840,000 | 32.70283 | 80 | 0.684955 | false |
Cadasta/cadasta-platform | cadasta/xforms/tests/test_utils.py | 1 | 3134 | import pytest
from django.test import TestCase
from ..utils import InvalidODKGeometryError, odk_geom_to_wkt
class TestODKGeomToWKT(TestCase):
def setUp(self):
self.geoshape = ('45.56342779158167 -122.67650283873081 0.0 0.0;'
'45.56176327330353 -122.67669159919024 0.0 0.0;'
'45.56151562182025 -122.67490658909082 0.0 0.0;'
'45.563479432877415 -122.67494414001703 0.0 0.0;'
'45.56176327330353 -122.67669159919024 0.0 0.0')
self.line = ('45.56342779158167 -122.67650283873081 0.0 0.0;'
'45.56176327330353 -122.67669159919024 0.0 0.0;'
'45.56151562182025 -122.67490658909082 0.0 0.0;')
self.simple_line = (
'45.56342779158167 -122.67650283873081 0.0 0.0;'
'45.56176327330353 -122.67669159919024 0.0 0.0;'
)
self.geotrace_as_poly = (
'52.9414478 -8.034659 0.0 0.0;'
'52.94134675 -8.0354197 0.0 0.0;'
'52.94129841 -8.03517551 0.0 0.0;'
'52.94142406 -8.03487897 0.0 0.0;'
'52.9414478 -8.034659 0.0 0.0;'
)
self.point = '45.56342779158167 -122.67650283873081 0.0 0.0;'
def test_geoshape(self):
poly = (
'POLYGON ((-122.6765028387308121 45.5634277915816668, '
'-122.6766915991902351 45.5617632733035265, -122.6749065890908241 '
'45.5615156218202486, -122.6749441400170326 45.5634794328774149, '
'-122.6765028387308121 45.5634277915816668))'
)
geom = odk_geom_to_wkt(self.geoshape)
assert geom == poly
def test_geotrace(self):
line = (
'LINESTRING (-122.6765028387308121 45.5634277915816668, '
'-122.6766915991902351 45.5617632733035265, -122.6749065890908241 '
'45.5615156218202486)'
)
geom = odk_geom_to_wkt(self.line)
assert geom == line
def test_geopoint(self):
point = 'POINT (-122.6765028387308121 45.5634277915816668)'
geom = odk_geom_to_wkt(self.point)
assert geom == point
def test_line_two_points(self):
line = (
'LINESTRING (-122.6765028387308121 45.5634277915816668, '
'-122.6766915991902351 45.5617632733035265)'
)
geom = odk_geom_to_wkt(self.simple_line)
assert geom == line
def test_geotrace_as_poly(self):
poly = (
'POLYGON ((-8.0346589999999996 52.9414477999999988, '
'-8.0354197000000003 52.9413467500000010, -8.0351755100000002 '
'52.9412984100000017, -8.0348789699999994 52.9414240600000028, '
'-8.0346589999999996 52.9414477999999988))'
)
geom = odk_geom_to_wkt(self.geotrace_as_poly)
assert geom == poly
def test_bad_geom(self):
bad_geom = 'this is not a geometry'
with pytest.raises(InvalidODKGeometryError) as e:
odk_geom_to_wkt(bad_geom)
assert str(e.value) == (
"Invalid ODK Geometry: could not convert string to float: 'is'"
)
| agpl-3.0 | -5,008,687,416,031,206,000 | 36.309524 | 79 | 0.585514 | false |
google/struct2tensor | struct2tensor/expression_impl/slice_expression_test.py | 1 | 7170 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slice_expression."""
from absl.testing import absltest
from struct2tensor import calculate
from struct2tensor import create_expression
from struct2tensor import path
# For tf.Session.Run against a Prensor
from struct2tensor import prensor_value # pylint: disable=unused-import
from struct2tensor.expression_impl import slice_expression
from struct2tensor.test import prensor_test_util
import tensorflow as tf
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class SliceExpressionTest(tf.test.TestCase):
def test_slice_end(self):
root = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
root_2 = slice_expression.slice_expression(root, path.Path(["doc"]),
"new_doc", None, 1)
result = calculate.calculate_prensors([root_2])[0]
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc"
])).node.parent_index, [0, 1])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc", "keep_me"
])).node.parent_index, [0, 1])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc",
"keep_me"])).node.values,
[False, True])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc",
"bar"])).node.parent_index,
[0, 1, 1])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc",
"bar"])).node.values,
[b"a", b"b", b"c"])
def test_slice_begin(self):
"""Test slice with only begin specified.
Starts with:
{
foo:9,
foorepeated:[9],
doc:[{
bar:["a"],
keep_me:False
}],
user:[
{
friends:["a"]
}]
}
{foo:8,
foorepeated:[8,7],
doc:[{
bar:["b","c"],
keep_me:True
},{
bar:["d"]
}],
user:[{
friends:["b", "c"]
},{
friends:["d"]
}],
}
{foo:7,
foorepeated:[6],
user:[{friends:["e"]}]}
Creates new_doc by slicing doc[1:]:
{foo:9,
foorepeated:[9],
doc:[{
bar:["a"],
keep_me:False
}],
user:[{
friends:["a"]
}]}
{foo:8,
foorepeated:[8,7],
doc:[{
bar:["b","c"],
keep_me:True
},{
bar:["d"]
}],
new_doc[{
bar:["d"]
}],
user:[{
friends:["b", "c"]
},{
friends:["d"]}],}
{foo:7,
foorepeated:[6],
user:[{
friends:["e"]
}]}
"""
root = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
root_2 = slice_expression.slice_expression(root, path.Path(["doc"]),
"new_doc", 1, None)
result = calculate.calculate_prensors([root_2])[0]
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc"
])).node.parent_index, [1])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc", "keep_me"
])).node.parent_index, [])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc",
"keep_me"])).node.values, [])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc",
"bar"])).node.parent_index,
[0])
self.assertAllEqual(
result.get_descendant_or_error(path.Path(["new_doc",
"bar"])).node.values, [b"d"])
def test_slice_mask(self):
root = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
root_2, new_path = slice_expression._get_slice_mask(root,
path.Path(["doc"]),
None, 1)
result = calculate.calculate_prensors([root_2])[0]
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.parent_index, [0, 1, 1])
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.values,
[True, True, False])
def test_slice_mask_end_negative(self):
root = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
root_2, new_path = slice_expression._get_slice_mask(root,
path.Path(["doc"]),
None, -1)
result = calculate.calculate_prensors([root_2])[0]
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.parent_index, [0, 1, 1])
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.values,
[False, True, False])
def test_slice_mask_begin_positive(self):
root = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
root_2, new_path = slice_expression._get_slice_mask(root,
path.Path(["doc"]), 1,
None)
[result] = calculate.calculate_prensors([root_2])
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.parent_index, [0, 1, 1])
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.values,
[False, False, True])
def test_slice_mask_begin_negative(self):
root = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
root_2, new_path = slice_expression._get_slice_mask(root,
path.Path(["doc"]), -1,
None)
result = calculate.calculate_prensors([root_2])[0]
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.parent_index, [0, 1, 1])
self.assertAllEqual(
result.get_descendant_or_error(new_path).node.values,
[True, False, True])
if __name__ == "__main__":
absltest.main()
| apache-2.0 | 1,086,534,423,020,961,500 | 35.030151 | 95 | 0.539191 | false |
KazDragon/munin | conanfile.py | 1 | 2093 | from conans import ConanFile, CMake, tools
class MuninConan(ConanFile):
name = "munin"
license = "MIT"
author = "KazDragon"
url = "https://github.com/KazDragon/munin"
description = "A text-based gui component library build on Terminal++"
topics = ("ansi-escape-codes", "text-ui")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "coverage": [True, False], "sanitize" : ["off", "address"]}
default_options = {"shared": False, "coverage": False, "sanitize": "off"}
exports = "*.hpp", "*.in", "*.cpp", "CMakeLists.txt", "*.md", "LICENSE"
requires = ("terminalpp/[>=2.0.1]@kazdragon/conan-public",
"nlohmann_json/[>=3.3.0]",
"boost/[>=1.69]")
build_requires = ("gtest/[>=1.8.1]")
generators = "cmake"
def imports(self):
# If Munin is built as shared, then running the tests will
# rely on the shared object for terminalpp being available
# in the same directory.
self.copy("*.so*", dst="", src="", keep_path=False, root_package="terminalpp")
def configure(self):
self.options["terminalpp"].shared = self.options.shared
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.definitions["MUNIN_COVERAGE"] = self.options.coverage
cmake.definitions["MUNIN_SANITIZE"] = self.options.sanitize
cmake.configure()
cmake.build()
def package(self):
self.copy("*.hpp", dst="include", src="include")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.so.*", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
if self.settings.build_type == "Debug":
self.cpp_info.libs = ["munind"]
else:
self.cpp_info.libs = ["munin"]
| mit | 1,457,283,711,952,135,400 | 39.25 | 99 | 0.584329 | false |
Zanzibar82/streamondemand.test | channels/cinestreaming01.py | 1 | 4463 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para cinestreaming01.com
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "cinestreaming01"
__category__ = "F"
__type__ = "generic"
__title__ = "Cinestreaming01"
__language__ = "IT"
DEBUG = config.get_setting("debug")
sito="http://www.cinestreaming01.com"
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand.cinestreaming01 mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="[COLOR azure]Ultimi Film Inseriti[/COLOR]", action="peliculas", url=sito, thumbnail="http://dc584.4shared.com/img/XImgcB94/s7/13feaf0b538/saquinho_de_pipoca_01"))
itemlist.append( Item(channel=__channel__, title="[COLOR azure]Film Per Categoria[/COLOR]", action="categorias", url=sito, thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"))
itemlist.append( Item(channel=__channel__, title="[COLOR yellow]Cerca...[/COLOR]", action="search", thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"))
return itemlist
def categorias(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
bloque = scrapertools.get_match(data,'<ul class="main-menu clearfix">(.*?)</ul>')
# Extrae las entradas (carpetas)
patron = '<li><a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR azure]"+scrapedtitle+"[/COLOR]" , url=sito+scrapedurl , thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png", folder=True) )
return itemlist
def search(item,texto):
logger.info("[cinestreaming01.py] "+item.url+" search "+texto)
item.url = "http://cinestreaming01.com/?s="+texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def peliculas(item):
logger.info("streamondemand.cinestreaming01 peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
#patron = '<div class="boxim">\s*'
patron = '<div class="box " id="post-.*?">.*?<a href="(.*?)"><img class="boximg" src="http://cinestreaming01.com/wp-content/themes/Boxoffice/timthumb.php?src=(.*?)&h=270&w=180&zc=1" alt=""/></a>\s*'
patron += '<h2><a href=".*?" rel="bookmark" title=".*?">(.*?)</a></h2>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
#scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming e download ita ",""))
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title="[COLOR azure]"+scrapedtitle+"[/COLOR]" , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
# Extrae el paginador
patronvideos = '<span class="pnext"><a href="(.*?)">Avanti</a></span>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedurl = urlparse.urljoin(item.url,matches[0])
itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo>>[/COLOR]" , url=scrapedurl , thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True) )
return itemlist
| gpl-3.0 | -8,911,400,560,965,957,000 | 43.62 | 299 | 0.6645 | false |
serge-sans-paille/pythran | pythran/tests/cases/stone.py | 1 | 5387 | #pythran export whetstone(int)
#runas whetstone(2*10**2)
#bench whetstone(1500)
"""
/*
* C Converted Whetstone Double Precision Benchmark
* Version 1.2 22 March 1998
*
* (c) Copyright 1998 Painter Engineering, Inc.
* All Rights Reserved.
*
* Permission is granted to use, duplicate, and
* publish this text and program as long as it
* includes this entire comment block and limited
* rights reference.
*
* Converted by Rich Painter, Painter Engineering, Inc. based on the
* www.netlib.org benchmark/whetstoned version obtained 16 March 1998.
*
* A novel approach was used here to keep the look and feel of the
* FORTRAN version. Altering the FORTRAN-based array indices,
* starting at element 1, to start at element 0 for C, would require
* numerous changes, including decrementing the variable indices by 1.
* Instead, the array E1[] was declared 1 element larger in C. This
* allows the FORTRAN index range to function without any literal or
* variable indices changes. The array element E1[0] is simply never
* used and does not alter the benchmark results.
*
* The major FORTRAN comment blocks were retained to minimize
* differences between versions. Modules N5 and N12, like in the
* FORTRAN version, have been eliminated here.
*
* An optional command-line argument has been provided [-c] to
* offer continuous repetition of the entire benchmark.
* An optional argument for setting an alternate LOOP count is also
* provided. Define PRINTOUT to cause the POUT() function to print
* outputs at various stages. Final timing measurements should be
* made with the PRINTOUT undefined.
*
* Questions and comments may be directed to the author at
* [email protected]
*/
"""
from math import sin as DSIN, cos as DCOS, atan as DATAN, log as DLOG, exp as DEXP, sqrt as DSQRT
def whetstone(loopstart):
# The actual benchmark starts here.
T = .499975;
T1 = 0.50025;
T2 = 2.0;
# With loopcount LOOP=10, one million Whetstone instructions
# will be executed in EACH MAJOR LOOP..A MAJOR LOOP IS EXECUTED
# 'II' TIMES TO INCREASE WALL-CLOCK TIMING ACCURACY.
LOOP = loopstart;
II = 1;
JJ = 1;
while JJ <= II:
N1 = 0;
N2 = 12 * LOOP;
N3 = 14 * LOOP;
N4 = 345 * LOOP;
N6 = 210 * LOOP;
N7 = 32 * LOOP;
N8 = 899 * LOOP;
N9 = 616 * LOOP;
N10 = 0;
N11 = 93 * LOOP;
# Module 1: Simple identifiers
X1 = 1.0;
X2 = -1.0;
X3 = -1.0;
X4 = -1.0;
for I in range(1,N1+1):
X1 = (X1 + X2 + X3 - X4) * T;
X2 = (X1 + X2 - X3 + X4) * T;
X3 = (X1 - X2 + X3 + X4) * T;
X4 = (-X1+ X2 + X3 + X4) * T;
# Module 2: Array elements
E1 = [ 1.0, -1.0, -1.0, -1.0 ]
for I in range(1,N2+1):
E1[0] = ( E1[0] + E1[1] + E1[2] - E1[3]) * T;
E1[1] = ( E1[0] + E1[1] - E1[2] + E1[3]) * T;
E1[2] = ( E1[0] - E1[1] + E1[2] + E1[3]) * T;
E1[3] = (-E1[0] + E1[1] + E1[2] + E1[3]) * T;
# Module 3: Array as parameter
for I in range(1,N3+1):
PA(E1, T, T2);
# Module 4: Conditional jumps
J = 1;
for I in range(1,N4+1):
if J == 1:
J = 2;
else:
J = 3;
if J > 2:
J = 0;
else:
J = 1;
if J < 1:
J = 1;
else:
J = 0;
# Module 5: Omitted
# Module 6: Integer arithmetic
J = 1;
K = 2;
L = 3;
for I in range(1,N6+1):
J = J * (K-J) * (L-K);
K = L * K - (L-J) * K;
L = (L-K) * (K+J);
E1[L-2] = J + K + L;
E1[K-2] = J * K * L;
# Module 7: Trigonometric functions
X = 0.5;
Y = 0.5;
for I in range(1,N7+1):
X = T * DATAN(T2*DSIN(X)*DCOS(X)/(DCOS(X+Y)+DCOS(X-Y)-1.0));
Y = T * DATAN(T2*DSIN(Y)*DCOS(Y)/(DCOS(X+Y)+DCOS(X-Y)-1.0));
# Module 8: Procedure calls
X = 1.0;
Y = 1.0;
Z = 1.0;
for I in range(1,N8+1):
Z=P3(X,Y,T, T2)
# Module 9: Array references
J = 1;
K = 2;
L = 3;
E1[0] = 1.0;
E1[1] = 2.0;
E1[2] = 3.0;
for I in range(1,N9+1):
P0(E1, J, K, L)
# Module 10: Integer arithmetic
J = 2;
K = 3;
for I in range(1,N10+1):
J = J + K;
K = J + K;
J = K - J;
K = K - J - J;
# Module 11: Standard functions
X = 0.75;
for I in range(1,N11+1):
X = DSQRT(DEXP(DLOG(X)/T1));
JJ+=1
KIP = (100.0*LOOP*II)
return KIP
def PA(E, T, T2):
J = 0;
while J<6:
E[0] = ( E[0] + E[1] + E[2] - E[3]) * T;
E[1] = ( E[0] + E[1] - E[2] + E[3]) * T;
E[2] = ( E[0] - E[1] + E[2] + E[3]) * T;
E[3] = (-E[0] + E[1] + E[2] + E[3]) / T2;
J += 1;
def P0(E1, J, K, L):
E1[J-1] = E1[K-1];
E1[K-1] = E1[L-1];
E1[L-1] = E1[J-1];
def P3(X, Y, T, T2):
X1 = X;
Y1 = Y;
X1 = T * (X1 + Y1);
Y1 = T * (X1 + Y1);
return (X1 + Y1) / T2;
| bsd-3-clause | 8,200,889,701,506,693,000 | 24.899038 | 97 | 0.484685 | false |
randombit/botan | src/scripts/ci_check_install.py | 1 | 2918 | #!/usr/bin/env python
# coding=utf8
"""
Botan CI check installation script
This script is used to validate the results of `make install`
(C) 2020 Jack Lloyd, René Meusel, Hannes Rantzsch
Botan is released under the Simplified BSD License (see license.txt)
"""
import os
import sys
import json
import re
def verify_library(build_config):
lib_dir = build_config['libdir']
if not os.path.isdir(lib_dir):
print('Error: libdir "%s" is not a directory' % lib_dir)
return False
found_libs = set([])
major_version = int(build_config["version_major"])
if build_config['compiler'] == 'msvc':
expected_lib_format = r'^botan\.(dll|lib)$'
elif build_config['os'] == 'macos':
expected_lib_format = r'^libbotan-%d\.(a|dylib)$' % (major_version)
else:
expected_lib_format = r'^libbotan-%d\.(a|so)$' % (major_version)
lib_re = re.compile(expected_lib_format)
# Unlike the include dir this may have other random libs in it
for (_, _, filenames) in os.walk(lib_dir):
for filename in filenames:
if lib_re.match(filename) is not None:
found_libs.add(filename)
if len(found_libs) == 0:
print("Could not find any libraries from us")
return False
# This should match up the count and names of libraries installed
# vs the build configuration (eg static lib installed or not)
return True
def verify_includes(build_config):
include_dir = build_config['installed_include_dir']
if not os.path.isdir(include_dir):
print('Error: installed_include_dir "%s" is not a directory' % include_dir)
return False
expected_headers = set(build_config['public_headers'] + build_config['external_headers'])
found_headers = set([])
for (_, _, filenames) in os.walk(include_dir):
for filename in filenames:
found_headers.add(filename)
if found_headers != expected_headers:
missing = expected_headers - found_headers
extra = found_headers - expected_headers
if len(missing) > 0:
print("Missing expected headers: %s" % (" ".join(sorted(missing))))
if len(extra) > 0:
print("Have unexpected headers: %s" % (" ".join(sorted(extra))))
return False
return True
def main(args=None):
if args is None:
args = sys.argv
if len(args) < 2:
print("Usage: %s <build_config.json>" % args[0])
return 1
with open(os.path.join(args[1])) as f:
build_config = json.load(f)
install_prefix = build_config['prefix']
if not os.path.isdir(install_prefix):
print('Error: install_prefix "%s" is not a directory' % install_prefix)
return 1
if not verify_includes(build_config):
return 1
if not verify_library(build_config):
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-2-clause | -1,211,456,448,052,397,800 | 27.048077 | 93 | 0.622557 | false |
Socialsquare/RunningCause | challenges/tasks.py | 1 | 1414 | # coding: utf8
from __future__ import absolute_import
import datetime
from celery import shared_task
from celery.utils.log import get_task_logger
from django.utils.translation import ugettext as _
from django.core.mail import send_mail
from django.contrib.auth import get_user_model
from django.conf import settings
from django.template import loader, Context
from common.helpers import send_email
from .models import Challenge
log = get_task_logger(__name__)
def send_challenge_reminder(user_id):
user = get_user_model().objects.get(id=user_id)
today = datetime.date.today()
filters = {
'status': Challenge.ACTIVE,
'end_date': today
}
ending_challenges = user.challenges_recieved.filter(**filters)
email_subject = _('Challenge ends today!')
email_context = {
'ending_challenges': ending_challenges
}
send_email([user.email],
email_subject,
'challenges/emails/challenges_reminder.html',
email_context)
@shared_task(ignore_result=True)
def send_challenge_reminders():
# Fetch runners that has challenges ending today.
today = datetime.date.today()
filters = {
'is_active': True,
'challenges_recieved__end_date': today
}
relevant_runners = get_user_model().objects.filter(**filters)
for runner in relevant_runners:
send_challenge_reminder(runner.id)
| mit | -5,044,719,315,472,939,000 | 25.679245 | 66 | 0.683876 | false |
JTarball/tetherbox | docker/app/app/backend/apps/services/migrations/0001_initial.py | 1 | 3463 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('action_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('description', models.CharField(max_length=255)),
('status', models.SmallIntegerField(default=0, choices=[(0, b'Disabled'), (1, b'Coming Soon'), (2, b'Beta'), (3, b'Enabled')])),
],
),
migrations.CreateModel(
name='Tether',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('enabled', models.BooleanField(default=True, help_text='Designates whether the a web trigger-action is enabled.')),
('actions', models.ManyToManyField(to='services.Action')),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
),
migrations.CreateModel(
name='Trigger',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('trigger_id', models.IntegerField()),
('service', models.ForeignKey(to='services.Service')),
],
),
migrations.CreateModel(
name='UserService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=255)),
('service', models.ForeignKey(related_name='+', to='services.Service', to_field=b'name')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='tether',
name='trigger',
field=models.ForeignKey(to='services.Trigger'),
),
migrations.AddField(
model_name='tether',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='action',
name='service',
field=models.ForeignKey(to='services.Service'),
),
]
| isc | -504,606,766,729,863,940 | 42.2875 | 182 | 0.556743 | false |
david-martin/atomic-reactor | tests/test_inner.py | 1 | 20378 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from atomic_reactor.build import InsideBuilder
from atomic_reactor.util import ImageName
from atomic_reactor.plugin import (PreBuildPlugin, PrePublishPlugin, PostBuildPlugin, ExitPlugin,
AutoRebuildCanceledException)
from atomic_reactor.plugin import PluginFailedException
import atomic_reactor.plugin
import logging
from flexmock import flexmock
import pytest
from tests.constants import MOCK_SOURCE, SOURCE
from tests.docker_mock import mock_docker
import inspect
from atomic_reactor.inner import BuildResults, BuildResultsEncoder, BuildResultsJSONDecoder
from atomic_reactor.inner import DockerBuildWorkflow
BUILD_RESULTS_ATTRS = ['build_logs',
'built_img_inspect',
'built_img_info',
'base_img_info',
'base_plugins_output',
'built_img_plugins_output']
def test_build_results_encoder():
results = BuildResults()
expected_data = {}
for attr in BUILD_RESULTS_ATTRS:
setattr(results, attr, attr)
expected_data[attr] = attr
data = json.loads(json.dumps(results, cls=BuildResultsEncoder))
assert data == expected_data
def test_build_results_decoder():
data = {}
expected_results = BuildResults()
for attr in BUILD_RESULTS_ATTRS:
setattr(expected_results, attr, attr)
data[attr] = attr
results = json.loads(json.dumps(data), cls=BuildResultsJSONDecoder)
for attr in set(BUILD_RESULTS_ATTRS) - set(['build_logs']):
assert getattr(results, attr) == getattr(expected_results, attr)
class MockDockerTasker(object):
def inspect_image(self, name):
return {}
class X(object):
pass
class MockInsideBuilder(object):
def __init__(self, failed=False):
self.tasker = MockDockerTasker()
self.base_image = ImageName(repo='Fedora', tag='22')
self.image_id = 'asd'
self.failed = failed
@property
def source(self):
result = X()
setattr(result, 'dockerfile_path', '/')
setattr(result, 'path', '/tmp')
return result
def pull_base_image(self, source_registry, insecure=False):
pass
def build(self):
result = X()
setattr(result, 'logs', None)
setattr(result, 'is_failed', lambda: self.failed)
return result
def inspect_built_image(self):
return None
class RaisesMixIn(object):
"""
Mix-in class for plugins that should raise exceptions.
"""
is_allowed_to_fail = False
def __init__(self, tasker, workflow, *args, **kwargs):
super(RaisesMixIn, self).__init__(tasker, workflow,
*args, **kwargs)
def run(self):
raise RuntimeError
class PreRaises(RaisesMixIn, PreBuildPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'pre_raises'
class PostRaises(RaisesMixIn, PostBuildPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'post_raises'
class PrePubRaises(RaisesMixIn, PrePublishPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'prepub_raises'
class WatchedMixIn(object):
"""
Mix-in class for plugins we want to watch.
"""
def __init__(self, tasker, workflow, watcher, *args, **kwargs):
super(WatchedMixIn, self).__init__(tasker, workflow,
*args, **kwargs)
self.watcher = watcher
def run(self):
self.watcher.call()
class PreWatched(WatchedMixIn, PreBuildPlugin):
"""
A PreBuild plugin we can watch.
"""
key = 'pre_watched'
class PrePubWatched(WatchedMixIn, PrePublishPlugin):
"""
A PrePublish plugin we can watch.
"""
key = 'prepub_watched'
class PostWatched(WatchedMixIn, PostBuildPlugin):
"""
A PostBuild plugin we can watch.
"""
key = 'post_watched'
class ExitWatched(WatchedMixIn, ExitPlugin):
"""
An Exit plugin we can watch.
"""
key = 'exit_watched'
class ExitRaises(RaisesMixIn, ExitPlugin):
"""
An Exit plugin that should raise an exception.
"""
key = 'exit_raises'
class ExitCompat(WatchedMixIn, ExitPlugin):
"""
An Exit plugin called as a Post-build plugin.
"""
key = 'store_logs_to_file'
class Watcher(object):
def __init__(self):
self.called = False
def call(self):
self.called = True
def was_called(self):
return self.called
def test_workflow():
"""
Test normal workflow.
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_pre.was_called()
assert watch_prepub.was_called()
assert watch_post.was_called()
assert watch_exit.was_called()
class FakeLogger(object):
def __init__(self):
self.debugs = []
self.infos = []
self.warnings = []
self.errors = []
def log(self, logs, args):
logs.append(args)
def debug(self, *args):
self.log(self.debugs, args)
def info(self, *args):
self.log(self.infos, args)
def warning(self, *args):
self.log(self.warnings, args)
def error(self, *args):
self.log(self.errors, args)
def test_workflow_compat():
"""
Some of our plugins have changed from being run post-build to
being run at exit. Let's test what happens when we try running an
exit plugin as a post-build plugin.
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
fake_logger = FakeLogger()
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
postbuild_plugins=[{'name': 'store_logs_to_file',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_exit.was_called()
assert len(fake_logger.errors) > 0
class Pre(PreBuildPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'pre'
class Post(PostBuildPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'post'
class Exit(ExitPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'exit'
@pytest.mark.parametrize(('plugins', 'should_fail', 'should_log'), [
# No 'name' key, prebuild
({
'prebuild_plugins': [{'args': {}},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, postbuild
({
'postbuild_plugins': [{'args': {}},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, exit
({
'exit_plugins': [{'args': {}},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}],
},
False, # not fatal
True, # logs error
),
# No 'args' key, prebuild
({'prebuild_plugins': [{'name': 'pre'},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No 'args' key, postbuild
({'postbuild_plugins': [{'name': 'post'},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal,
False, # no error logged
),
# No 'args' key, exit
({'exit_plugins': [{'name': 'exit'},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No such plugin, prebuild
({'prebuild_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, postbuild
({'postbuild_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, exit
({'exit_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
True, # logs error
),
])
def test_plugin_errors(plugins, should_fail, should_log):
"""
Try bad plugin configuration.
"""
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
fake_logger = FakeLogger()
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
plugin_files=[this_file],
**plugins)
# Find the 'watcher' parameter
watchers = [conf.get('args', {}).get('watcher')
for plugin in plugins.values()
for conf in plugin]
watcher = [x for x in watchers if x][0]
if should_fail:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
assert not watcher.was_called()
else:
workflow.build_docker_image()
assert watcher.was_called()
if should_log:
assert len(fake_logger.errors) > 0
else:
assert len(fake_logger.errors) == 0
class StopAutorebuildPlugin(PreBuildPlugin):
key = 'stopstopstop'
def run(self):
raise AutoRebuildCanceledException(self.key, 'message')
def test_autorebuild_stop_prevents_build():
"""
test that a plugin that raises AutoRebuildCanceledException results in actually skipped build
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'stopstopstop',
'args': {
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
with pytest.raises(AutoRebuildCanceledException):
workflow.build_docker_image()
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
assert workflow.autorebuild_canceled == True
@pytest.mark.parametrize('fail_at', ['pre', 'prepub', 'post', 'exit'])
def test_workflow_plugin_error(fail_at):
"""
This is a test for what happens when plugins fail.
When a prebuild or postbuild plugin fails, and doesn't have
is_allowed_to_fail=True set, the whole build should fail.
However, all the exit plugins should run.
"""
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
prebuild_plugins = [{'name': 'pre_watched',
'args': {
'watcher': watch_pre,
}}]
prepublish_plugins = [{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}]
postbuild_plugins = [{'name': 'post_watched',
'args': {
'watcher': watch_post
}}]
exit_plugins = [{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}]
# Insert a failing plugin into one of the build phases
if fail_at == 'pre':
prebuild_plugins.insert(0, {'name': 'pre_raises', 'args': {}})
elif fail_at == 'prepub':
prepublish_plugins.insert(0, {'name': 'prepub_raises', 'args': {}})
elif fail_at == 'post':
postbuild_plugins.insert(0, {'name': 'post_raises', 'args': {}})
elif fail_at == 'exit':
exit_plugins.insert(0, {'name': 'exit_raises', 'args': {}})
else:
# Typo in the parameter list?
assert False
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=prebuild_plugins,
prepublish_plugins=prepublish_plugins,
postbuild_plugins=postbuild_plugins,
exit_plugins=exit_plugins,
plugin_files=[this_file])
# Failures in any phase except 'exit' cause the build process to
# abort.
if fail_at == 'exit':
workflow.build_docker_image()
else:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
# The pre-build phase should only complete if there were no
# earlier plugin failures.
assert watch_pre.was_called() == (fail_at != 'pre')
# The prepublish phase should only complete if there were no
# earlier plugin failures.
assert watch_prepub.was_called() == (fail_at not in ('pre', 'prepub'))
# The post-build phase should only complete if there were no
# earlier plugin failures.
assert watch_post.was_called() == (fail_at not in ('pre', 'prepub', 'post'))
# But all exit plugins should run, even if one of them also raises
# an exception.
assert watch_exit.was_called()
def test_workflow_docker_build_error():
"""
This is a test for what happens when the docker build fails.
"""
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder(failed=True)
flexmock(InsideBuilder).new_instances(fake_builder)
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
assert workflow.build_docker_image().is_failed()
# No subsequent build phases should have run except 'exit'
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
class ExitUsesSource(ExitWatched):
key = 'uses_source'
def run(self):
assert os.path.exists(self.workflow.source.get_dockerfile_path()[0])
WatchedMixIn.run(self)
def test_source_not_removed_for_exit_plugins():
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
workflow = DockerBuildWorkflow(SOURCE, 'test-image',
exit_plugins=[{'name': 'uses_source',
'args': {
'watcher': watch_exit,
}}],
plugin_files=[this_file])
workflow.build_docker_image()
# Make sure that the plugin was actually run
assert watch_exit.was_called()
| bsd-3-clause | -4,677,298,146,013,047,000 | 30.159021 | 97 | 0.48464 | false |
migasfree/migasfree-backend | migasfree/client/models/error.py | 1 | 4593 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <[email protected]>
# Copyright (c) 2015-2021 Alberto Gacías <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models.aggregates import Count
from django.utils.translation import gettext_lazy as _
from ...core.models import Project
from .computer import Computer
from .event import Event
class DomainErrorManager(models.Manager):
def get_queryset(self):
return super().get_queryset().select_related(
'project',
'computer',
'computer__project',
'computer__sync_user',
)
def scope(self, user):
qs = self.get_queryset()
if not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
class UncheckedManager(DomainErrorManager):
def get_queryset(self):
return super().get_queryset().filter(checked=0)
def scope(self, user):
return super().scope(user).filter(checked=0)
class ErrorManager(DomainErrorManager):
def create(self, computer, project, description):
obj = Error()
obj.computer = computer
obj.project = project
obj.description = description
obj.save()
return obj
class Error(Event):
description = models.TextField(
verbose_name=_("description"),
null=True,
blank=True
)
checked = models.BooleanField(
verbose_name=_("checked"),
default=False,
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
verbose_name=_("project")
)
objects = ErrorManager()
unchecked = UncheckedManager()
@staticmethod
def unchecked_count(user=None):
if not user:
return Error.unchecked.count()
return Error.unchecked.scope(user).count()
@staticmethod
def unchecked_by_project(user):
total = Error.unchecked_count(user)
projects = list(Error.unchecked.scope(user).values(
'project__name',
'project__id',
'project__platform__id',
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
platforms = list(Error.unchecked.scope(user).values(
'project__platform__id',
'project__platform__name'
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
return {
'total': total,
'inner': platforms,
'outer': projects,
}
@staticmethod
def status_by_project(user):
total = Error.objects.scope(user).count()
projects = list(Error.objects.scope(user).values(
'computer__status',
'project__id',
'project__name',
).annotate(
count=Count('id')
).order_by('computer__status', '-count'))
status = list(Error.objects.scope(user).values(
'computer__status',
).annotate(
count=Count('id')
).order_by('computer__status', '-count'))
for item in status:
item['status'] = item.get('computer__status')
item['computer__status'] = _(dict(Computer.STATUS_CHOICES)[item.get('computer__status')])
return {
'total': total,
'inner': status,
'outer': projects,
}
def checked_ok(self):
self.checked = True
self.save()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.description = self.description.replace("\r\n", "\n")
super().save(force_insert, force_update, using, update_fields)
class Meta:
app_label = 'client'
verbose_name = _('Error')
verbose_name_plural = _('Errors')
| gpl-3.0 | 349,263,582,367,790,340 | 27.515528 | 101 | 0.597691 | false |
stscieisenhamer/glue | glue/utils/tests/test_matplotlib.py | 2 | 5186 |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.artist import Artist
from numpy.testing import assert_allclose
from matplotlib.backends.backend_agg import FigureCanvasAgg
from glue.tests.helpers import requires_scipy
from glue.utils.misc import DeferredMethod
from ..matplotlib import (point_contour, fast_limits, all_artists, new_artists,
remove_artists, view_cascade, get_extent, color2rgb,
defer_draw, freeze_margins)
@requires_scipy
class TestPointContour(object):
def test(self):
data = np.array([[0, 0, 0, 0],
[0, 2, 3, 0],
[0, 4, 2, 0],
[0, 0, 0, 0]])
xy = point_contour(2, 2, data)
x = np.array([2., 2. + 1. / 3., 2., 2., 1, .5, 1, 1, 2])
y = np.array([2. / 3., 1., 2., 2., 2.5, 2., 1., 1., 2. / 3])
np.testing.assert_array_almost_equal(xy[:, 0], x)
np.testing.assert_array_almost_equal(xy[:, 1], y)
def test_fast_limits_nans():
x = np.zeros((10, 10)) * np.nan
assert_allclose(fast_limits(x, 0, 1), [0, 1])
def test_single_value():
x = np.array([1])
assert_allclose(fast_limits(x, 5., 95.), [1, 1])
def test_artist_functions():
c1 = Circle((0, 0), radius=1)
c2 = Circle((1, 0), radius=1)
c3 = Circle((2, 0), radius=1)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.add_patch(c1)
ax.add_patch(c2)
assert all_artists(fig) == set([c1, c2])
ax.add_patch(c3)
assert new_artists(fig, set([c1, c2])) == set([c3])
remove_artists([c2])
assert all_artists(fig) == set([c1, c3])
# check that it can deal with being passed the same artist twice
remove_artists([c1, c1])
assert all_artists(fig) == set([c3])
def test_get_extent():
assert get_extent((slice(0, 5, 1), slice(0, 10, 2))) == (0, 10, 0, 5)
assert get_extent((slice(0, 5, 1), slice(0, 10, 2)), transpose=True) == (0, 5, 0, 10)
def test_view_cascade():
data = np.zeros((100, 100))
v2, view = view_cascade(data, (slice(0, 5, 1), slice(0, 5, 1)))
assert v2 == ((slice(0, 100, 20), slice(0, 100, 20)))
assert view == (slice(0, 5, 1), slice(0, 5, 1))
v2, view = view_cascade(data, (3, slice(0, 5, 1)))
assert v2 == ((3, slice(0, 100, 20)))
assert view == (3, slice(0, 5, 1))
def test_defer_draw():
@defer_draw
def draw_figure():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [4, 5, 6])
fig.canvas.draw()
return 3.5
result = draw_figure()
# Make sure that the return value was passed through correctly
assert result == 3.5
def test_defer_draw_exception():
# Regression test for a bug that meant that if an exception happened during
# drawing, the draw method was not restored correctly
# Make sure we start off with a clean draw method
assert not isinstance(FigureCanvasAgg.draw, DeferredMethod)
class ProblematicArtist(Artist):
def draw(self, *args, **kwargs):
raise ValueError('You shall not pass!')
@defer_draw
def draw_figure():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.add_artist(ProblematicArtist())
fig.canvas.draw()
with pytest.raises(ValueError) as exc:
result = draw_figure()
assert exc.value.args[0] == 'You shall not pass!'
# Make sure that draw is no longer a deferred method
assert not isinstance(FigureCanvasAgg.draw, DeferredMethod)
@pytest.mark.parametrize(('color', 'rgb'),
(('red', (1, 0, 0)), ('green', (0, 0.5020, 0)), ('orange', (1., 0.6470, 0.))))
def test_color2rgb(color, rgb):
assert_allclose(color2rgb(color), rgb, atol=0.001)
def test_freeze_margins():
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
freeze_margins(ax, margins=[1, 1, 1, 1])
# Note, we don't test the following since the defaults change depending
# on the Matplotlib version
# bbox = ax.get_position()
# np.testing.assert_allclose(bbox.x0, 0.125)
# np.testing.assert_allclose(bbox.y0, 0.1)
# np.testing.assert_allclose(bbox.x1, 0.9)
# np.testing.assert_allclose(bbox.y1, 0.9)
fig.canvas.resize_event()
bbox = ax.get_position()
np.testing.assert_allclose(bbox.x0, 0.25)
np.testing.assert_allclose(bbox.y0, 0.25)
np.testing.assert_allclose(bbox.x1, 0.75)
np.testing.assert_allclose(bbox.y1, 0.75)
fig.set_size_inches(8, 8)
fig.canvas.resize_event()
bbox = ax.get_position()
np.testing.assert_allclose(bbox.x0, 0.125)
np.testing.assert_allclose(bbox.y0, 0.125)
np.testing.assert_allclose(bbox.x1, 0.875)
np.testing.assert_allclose(bbox.y1, 0.875)
ax.resizer.margins = [0, 1, 2, 4]
fig.canvas.resize_event()
bbox = ax.get_position()
np.testing.assert_allclose(bbox.x0, 0.)
np.testing.assert_allclose(bbox.y0, 0.25)
np.testing.assert_allclose(bbox.x1, 0.875)
np.testing.assert_allclose(bbox.y1, 0.5)
| bsd-3-clause | 2,965,087,516,347,311,000 | 28.299435 | 103 | 0.604126 | false |
wooey/Wooey | wooey/migrations/0018_userfile.py | 1 | 1037 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wooey.models.mixins
class Migration(migrations.Migration):
dependencies = [
('wooey', '0017_wooeyfile_generate_checksums'),
]
operations = [
migrations.CreateModel(
name='UserFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('filename', models.TextField()),
('job', models.ForeignKey(to='wooey.WooeyJob', on_delete=models.CASCADE)),
('parameter', models.ForeignKey(blank=True, to='wooey.ScriptParameters', null=True, on_delete=models.CASCADE)),
],
bases=(wooey.models.mixins.WooeyPy2Mixin, models.Model),
),
migrations.AddField(
model_name='userfile',
name='system_file',
field=models.ForeignKey(to='wooey.WooeyFile', on_delete=models.CASCADE),
),
]
| bsd-3-clause | 1,396,863,322,107,834,600 | 33.566667 | 127 | 0.594986 | false |
jfalkner/report_data | report_data/check.py | 1 | 3654 | from datetime import datetime
from datetime import timedelta
from datetime import date
class SanityChecker:
"""Validate input from URL arguments.
This helps keep code that needs to check input succinct and consistent
across the various reports. The main purpose for encapsulating these
methods is so that errors can optionally be thrown immediately or buffered
and thrown in aggregate. The latter option is default because it is often
most helpful to see the full list of issues all at once versus, for
example, showing the first error, having a user fix it, then repeating
the cycle for all other errors hidden by the first.
"""
def __init__(self):
self.errors = []
self.raise_error = False
def reset(self):
del self.errors[:]
def if_any_errors_raise_aggregate(self):
if not self.errors:
return
error_count = len(self.errors)
error_message = ""
if error_count > 1:
error_message += '%s values need to be corrected.' % error_count
raise AssertionError('%s\n\n%s' % (error_message,
',\n'.join(self.errors)))
def add_or_raise(self, error_message):
"""Either buffer or immediately raise a filter value error."""
if self.raise_error:
raise AssertionError(error_message)
else:
if not isinstance(error_message, basestring):
raise ValueError('Error messages must be strings.')
self.errors.append(error_message)
return None
def if_required(self, name, value, required):
if required and value is None:
self.add_or_raise("Must have value for '%s'." % name)
def date(self, values, name, required=False):
# Check for non-null value.
if name not in values:
if required:
return self.add_or_raise(
'Missing ISO 8601 date value for key "%s". '
'e.g. "2013-06-01" for June 1st 2013.' % name)
else:
return
# Check for expected ISO 8601 format.
try:
date_val = values[name]
if type(date_val) == date:
return date_val
return datetime.strptime(date, '%Y-%m-%d')
except ValueError:
return self.add_or_raise(
'Invalid date value: "%s". Expected ISO 8601 format. '
'e.g. "20130601" for June 1st 2013.')
def date_range(self, start_date, end_date):
# If no range, default to two full weeks plus days from this week.
if not start_date and not end_date:
now = datetime.now()
start_day = datetime.now() - timedelta(days=14)
while start_day.weekday() != 0:
start_day -= timedelta(days=1)
return (start_day, now)
# Sanity check both start and end dates exist.
if (start_date and not end_date):
self.add_or_raise('Found a start date but no end. Must have both '
'if you are specifying a date range.')
if (not start_date and end_date):
self.add_or_raise('Found a end date but no start. Must have both '
'if you are specifying a date range.')
# Sanity check that the start is before end date.
if start_date and end_date:
if end_date <= start_date:
self.add_or_raise(
'End date "%s" must be after start date "%s".' %
(start_date, end_date))
return start_date, end_date
| mit | 3,428,162,843,007,612,000 | 40.05618 | 78 | 0.574165 | false |
CMacKinnon101/pokemon-python-api-adapter | get_cards_from_sets.py | 1 | 3637 | #Modules
import configparser
from pokemontcgsdk import Set
from pokemontcgsdk import Card
from pymongo import MongoClient
#Config
Config = configparser.ConfigParser()
Config.read("settings.ini")
host = Config.get("db", "host")
port = Config.get("db", "port")
user = Config.get("db", "user")
password = Config.get("db", "password")
pass_colon_str = ""
at_str = ""
if user:
pass_colon_str = ":"
at_str = "@"
#Build Connection String
connection_string = "mongodb://{0}{1}{2}{3}{4}:{5}".format(user, pass_colon_str, password, at_str, host, port)
#Get the sets from the pokemontcg api
print("Getting sets from pokemontcgsdk")
pokemontcgapi_sets = Set.all()
print(" Found sets:")
for pokemontcgapi_set in pokemontcgapi_sets:
print(" -- {0}".format(pokemontcgapi_set.name))
#Connect to Mongo
print("Connecting to {0}".format(connection_string))
mongo_client = MongoClient(connection_string)
#Get the Database Object
card_data_database = mongo_client.card_data
sets_collection = card_data_database.sets
cards_collection = card_data_database.cards
#Get all the sets that we already have cards for
print("\nGetting sets from {0}".format(host))
mongo_sets_cursor = sets_collection.find()
#For each card, insert a document into mongo
print("\nInserting Cards into mongo")
for pokemontcgapi_set in pokemontcgapi_sets:
already_have_set = False
print("Checking for {0}({1})".format(pokemontcgapi_set.name, pokemontcgapi_set.code))
for mongo_set in mongo_sets_cursor:
if mongo_set.get('code') == pokemontcgapi_set.code:
already_have_set = True
print("Skipping {0}({1})".format(mongo_set.get('name'), mongo_set.get('code')))
break
if not already_have_set:
print("\nInserting {0}:".format(pokemontcgapi_set.name))
print("***********************************")
#Get the cards from the set
cards = Card.where(setCode=pokemontcgapi_set.code).all()
#Insert each card document into mongo
for card in cards:
print("-- {0}({1})".format(card.name, card.id))
cards_collection.insert_one({
"pokemontcgapi_id": card.id,
"name": card.name,
"national_pokedex_number": card.national_pokedex_number,
"image_url": card.image_url,
"subtype": card.subtype,
"supertype": card.supertype,
"ability": card.ability,
"ancient_trait": card.ancient_trait,
"hp": card.hp,
"number": card.number,
"artist": card.artist,
"rarity": card.rarity,
"series": card.series,
"set": card.set,
"set_code": card.set_code,
"retreat_cost": card.retreat_cost,
"text": card.text,
"types": card.types,
"attacks": card.attacks,
"weaknesses": card.weaknesses,
"resistances": card.resistances
})
sets_collection.insert_one({
"code": pokemontcgapi_set.code,
"name": pokemontcgapi_set.name,
"series": pokemontcgapi_set.series,
"total_cards": pokemontcgapi_set.total_cards,
"standard_legal": pokemontcgapi_set.standard_legal,
"expanded_legal": pokemontcgapi_set.expanded_legal,
"release_date": pokemontcgapi_set.release_date
})
print("Finished inserting {0}({1})\n\n".format(pokemontcgapi_set.name, pokemontcgapi_set.code))
print("\nClosing connection to {0}".format(host))
mongo_client.close() | mit | 3,130,499,382,758,920,000 | 35.38 | 110 | 0.606819 | false |
michaldz44/pyG-Attract | golem.py | 1 | 3778 | import math
import pdb
class Golem(object):
def __init__(self, x, y, args, attractors,golem_number):
self.attractors=attractors
self.args=args
self.position=complex(x,y)
self.velocity=complex(0,0)
#self.acceleration_previous=self.attractors.get_force(self.position,self.velocity)
self.acceleration_previous=0
self.final_attractor=None
self.energy=self.get_energy()
self.golem_number=golem_number
self.state=[]
def move(self):
# step
absv=abs(self.velocity)
if absv>1:
dt=self.args.dt*1/(absv)
else:
dt=self.args.dt
acceleration_current=self.attractors.get_force(self.position,self.velocity)
# let's ty to be accurate apply Beeman-Schofield algoritm
#
# position=\
# self.position+\
# self.velocity*dt+\
# dt*dt*(4*acceleration_current-self.acceleration_previous)/6.0
#
# v_predict=\
# self.velocity+\
# dt*(3*acceleration_current-self.acceleration_previous)/2.0
#
# acceleration_future=self.attractors.get_force(position,v_predict)
#
# self.velocity+=dt*(2*acceleration_future+5*acceleration_current-self.acceleration_previous)/6.0
#
# self.acceleration_previous=acceleration_current
# self.position=position
# Euler-Cromer fast simplified version
self.velocity+=acceleration_current*dt
self.position+=self.velocity*dt
if (self.energy-self.attractors.get_potencial(self.position))>0:
v=math.sqrt(2*(self.energy-self.attractors.get_potencial(self.position)))
else:
print("drag problem - velocity anihilated",self.golem_number,abs(self.velocity))
if abs(self.velocity)>0.1:
pdb.set_trace()
v=0.000001
#v=-math.sqrt(-2*(self.energy-self.attractors.get_potencial(self.position)))
absv=abs(self.velocity)
self.velocity=v*self.velocity/absv
#self.q=v/absv
self.energy-=dt*self.args.mu*absv*absv
#
# self.state.append((
# abs(self.velocity),
# self.attractors.get_potencial(self.position),
# self.energy,
# dt
# ))
#self.vpredict = self.velocity+ (3.0*self.acceleration0 - self.acceleration1)*dt/2.0
#self.acceleration2 += self.attractors.get_force(self.position,self.vpredict)
#self.acceleration2 += self.position - self.args.mu*self.vpredict
#self.velocity += (2.0*self.acceleration2+5.0*self.acceleration0 - self.acceleration1)*dt/6.0
#self.acceleration1 = self.acceleration0
#self.acceleration0 = self.acceleration2
def get_energy(self):
#print(self.attractors.get_potencial(self.position))
return self.attractors.get_potencial(self.position)+abs(self.velocity)**2/2.0
def do_move(self):
if self.final_attractor:
return False
self.move()
self.end_check()
return True
def get_color(self):
if self.final_attractor:
return self.final_attractor["color"]
def end_check(self):
# if final attrator is set we are fixed (attracted)
if self.attractors.min_distance(self.position) < self.args.pot_d and abs(self.velocity) < self.args.term_v: # close to the city and low velocity
self.final_attractor=self.attractors.min_attractor(self.position)
return True
if self.energy<self.attractors.min_attractor(self.position)["esc_energy"]:
self.final_attractor=self.attractors.min_attractor(self.position)
return True
return False
| gpl-2.0 | 6,252,138,201,953,294,000 | 35.679612 | 153 | 0.618052 | false |
KnowledgeLinks/rdfframework | rdfframework/utilities/valuecalculator.py | 1 | 1389 | import datetime
import pytz
# try:
# from rdfframework.utilities import iri, uri
# except ImportError:
# # Try Local Import
# from . import iri, uri
# def calculate_default_value(field):
# '''calculates the default value based on the field default input'''
# _calculation_string = field.get("kds_defaultVal", field.get("defaultVal"))
# _return_val = None
# if _calculation_string is None:
# return None
# if _calculation_string.startswith("item_class"):
# _return_val = iri(uri(field.get("kds_classUri",field.get("classUri"))))
# else:
# _calc_params = _calculation_string.split('+')
# _base = _calc_params[0].strip()
# if len(_calc_params) > 1:
# _add_value = float(_calc_params[1].strip())
# else:
# _add_value = 0
# if _base == 'today':
# _return_val = datetime.datetime.utcnow().replace(tzinfo = pytz.utc).date() +\
# datetime.timedelta(days=_add_value)
# elif _base == 'now':
# _return_val = datetime.datetime.utcnow().replace(tzinfo = pytz.utc) +\
# datetime.timedelta(days=_add_value)
# elif _base == 'time':
# _return_val = datetime.datetime.utcnow().replace(tzinfo = pytz.utc).time() +\
# datetime.timedelta(days=_add_value)
# return _return_val
| mit | -6,869,284,743,283,799,000 | 39.852941 | 91 | 0.572354 | false |
aliunsal/blog | Blogs/models.py | 1 | 1247 | from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
class Comment(models.Model):
content = models.TextField(null=False)
date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, default=0)
approved = models.BooleanField(default=False)
activation_key = models.TextField(max_length=150)
email = models.EmailField(null=False)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
comment = GenericRelation("Comment")
def __unicode__(self):
return self.content
class Post(models.Model):
title = models.CharField(max_length=300)
content = models.TextField(null=True)
date = models.DateTimeField(auto_now_add=True)
picture = models.ImageField(upload_to='static/img/post_image/',
null=True)
author = models.ForeignKey(User)
comment = GenericRelation(Comment)
def __unicode__(self):
return self.title
class Meta:
ordering = ["-id"] | gpl-2.0 | 1,953,466,976,391,067,100 | 31.842105 | 82 | 0.705694 | false |
botswana-harvard/edc-visit-tracking | edc_visit_tracking/form_validators/visit_form_validator.py | 1 | 4772 | from django import forms
from edc_constants.constants import OTHER, ALIVE, DEAD, YES, UNKNOWN
from edc_constants.constants import PARTICIPANT, NO
from edc_form_validators import FormValidator
from edc_form_validators.base_form_validator import REQUIRED_ERROR,\
INVALID_ERROR
from ..constants import MISSED_VISIT, LOST_VISIT, UNSCHEDULED
from ..visit_sequence import VisitSequence, VisitSequenceError
class VisitFormValidator(FormValidator):
visit_sequence_cls = VisitSequence
participant_label = 'participant'
def clean(self):
appointment = self.cleaned_data.get('appointment')
if not appointment:
raise forms.ValidationError({
'appointment': 'This field is required'},
code=REQUIRED_ERROR)
visit_sequence = self.visit_sequence_cls(appointment=appointment)
try:
visit_sequence.enforce_sequence()
except VisitSequenceError as e:
raise forms.ValidationError(e, code=INVALID_ERROR)
self.validate_visit_code_sequence_and_reason()
self.validate_presence()
self.validate_survival_status_if_alive()
self.validate_reason_and_info_source()
self.validate_required_fields()
def validate_visit_code_sequence_and_reason(self):
appointment = self.cleaned_data.get('appointment')
reason = self.cleaned_data.get('reason')
if appointment:
if (not appointment.visit_code_sequence
and reason == UNSCHEDULED):
raise forms.ValidationError({
'reason': 'Invalid. This is not an unscheduled visit'},
code=INVALID_ERROR)
if (appointment.visit_code_sequence
and reason != UNSCHEDULED):
raise forms.ValidationError({
'reason': 'Invalid. This is an unscheduled visit'},
code=INVALID_ERROR)
def validate_reason_and_info_source(self):
cleaned_data = self.cleaned_data
condition = cleaned_data.get('reason') != MISSED_VISIT
self.required_if_true(
condition,
field_required='info_source',
required_msg='Provide source of information.')
def validate_survival_status_if_alive(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('survival_status') in [ALIVE, DEAD]:
if not cleaned_data.get('last_alive_date'):
raise forms.ValidationError(
{'last_alive_date':
f'Provide date {self.participant_label} last known alive.'})
def validate_presence(self):
"""Raise an exception if 'is_present' does not make sense relative to
'survival status', 'reason' and 'info_source'."""
cleaned_data = self.cleaned_data
if cleaned_data.get('is_present') == YES:
if cleaned_data.get('survival_status') in [UNKNOWN, DEAD]:
raise forms.ValidationError(
{'survival_status':
'Survival status cannot be \'{survival_status}\' if '
'{participant} is present.'.format(
survival_status=cleaned_data.get(
'survival_status').lower(),
participant=self.participant_label)})
if cleaned_data.get('reason') in [MISSED_VISIT, LOST_VISIT]:
raise forms.ValidationError(
{'reason':
'You indicated that the reason for the visit report is '
'{reason} but also that the {participant} is present. '
'Please correct.'.format(
participant=self.participant_label,
reason=cleaned_data.get('reason'))})
elif cleaned_data.get('is_present') == NO:
if cleaned_data.get('info_source') == PARTICIPANT:
raise forms.ValidationError(
{'info_source': 'Source of information cannot be from '
'{participant} if {participant} is not present.'.format(
participant=self.participant_label)})
def validate_required_fields(self):
self.required_if(
MISSED_VISIT,
field='reason',
field_required='reason_missed')
self.required_if(
UNSCHEDULED,
field='reason',
field_required='reason_unscheduled')
self.required_if(
OTHER,
field='info_source',
field_required='info_source_other')
self.required_if(
OTHER,
field='reason_unscheduled',
field_required='reason_unscheduled_other')
| gpl-2.0 | 4,630,544,670,171,520,000 | 38.438017 | 81 | 0.582775 | false |
pyload/pyload | src/pyload/plugins/downloaders/ZDF.py | 1 | 2269 | # -*- coding: utf-8 -*-
import re
import json
import os
from pyload.core.network.request_factory import get_url
import xml.etree.ElementTree as etree
import pycurl
from ..base.downloader import BaseDownloader
# Based on zdfm by Roland Beermann (http://github.com/enkore/zdfm/)
class ZDF(BaseDownloader):
__name__ = "ZDF Mediathek"
__type__ = "downloader"
__version__ = "0.92"
__status__ = "testing"
__pattern__ = r"https://(?:www\.)?zdf\.de/(?P<ID>[/\w-]+)\.html"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """ZDF.de downloader plugin"""
__license__ = "GPLv3"
__authors__ = []
def process(self, pyfile):
self.data = self.load(pyfile.url)
try:
api_token = re.search(
r'window\.zdfsite\.player\.apiToken = "([\d\w]+)";', self.data
).group(1)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Api-Auth: Bearer " + api_token])
id = re.match(self.__pattern__, pyfile.url).group("ID")
filename = json.loads(
self.load(
"https://api.zdf.de/content/documents/zdf/" + id + ".json",
get={"profile": "player-3"},
)
)
stream_list = filename["mainVideoContent"]["http://zdf.de/rels/target"][
"streams"
]["default"]["extId"]
streams = json.loads(
self.load(
"https://api.zdf.de/tmd/2/ngplayer_2_4/vod/ptmd/mediathek/"
+ stream_list
)
)
download_name = streams["priorityList"][0]["formitaeten"][0]["qualities"][
0
]["audio"]["tracks"][0]["uri"]
self.pyfile.name = os.path.basename(id) + os.path.splitext(download_name)[1]
self.download(download_name)
except Exception as exc:
self.log_error(exc)
| agpl-3.0 | -6,561,568,899,936,143,000 | 32.865672 | 88 | 0.527545 | false |
seleniumbase/SeleniumBase | seleniumbase/core/download_helper.py | 1 | 2057 | import os
import shutil
import time
from seleniumbase.config import settings
from seleniumbase.fixtures import constants
# The "downloads_folder" is a folder for saving downloaded files.
# Works for downloads initiated by Chromium and Firefox WebDriver clicks.
# Browser type doesn't matter if using self.download_file(file_url)
# or self.save_file_as(file_url, new_file_name)
# The "downloads_folder" is cleaned out at the start of each pytest run,
# but there is an option to save existing files in "archived_files".
DOWNLOADS_DIR = constants.Files.DOWNLOADS_FOLDER
ARCHIVE_DIR = constants.Files.ARCHIVED_DOWNLOADS_FOLDER
abs_path = os.path.abspath(".")
downloads_path = os.path.join(abs_path, DOWNLOADS_DIR)
def get_downloads_folder():
return downloads_path
def reset_downloads_folder():
"""Clears the downloads folder.
If settings.ARCHIVE_EXISTING_DOWNLOADS is set to True, archives it."""
if os.path.exists(downloads_path) and not os.listdir(downloads_path) == []:
archived_downloads_folder = os.path.join(
downloads_path, "..", ARCHIVE_DIR
)
reset_downloads_folder_assistant(archived_downloads_folder)
def reset_downloads_folder_assistant(archived_downloads_folder):
if not os.path.exists(archived_downloads_folder):
try:
os.makedirs(archived_downloads_folder)
except Exception:
pass # Should only be reachable during multi-threaded test runs
new_archived_downloads_sub_folder = "%s/downloads_%s" % (
archived_downloads_folder,
int(time.time()),
)
if os.path.exists(downloads_path):
if not os.listdir(downloads_path) == []:
try:
shutil.move(downloads_path, new_archived_downloads_sub_folder)
os.makedirs(downloads_path)
except Exception:
pass
if not settings.ARCHIVE_EXISTING_DOWNLOADS:
try:
shutil.rmtree(new_archived_downloads_sub_folder)
except OSError:
pass
| mit | -4,152,373,432,248,076,000 | 36.4 | 79 | 0.674283 | false |
CountZer0/PipelineConstructionSet | python/maya/site-packages/pymel-1.0.5/pymel/core/nodetypes.py | 1 | 123778 | """
Contains classes corresponding to the Maya type hierarchy, including `DependNode`, `Transform`, `Mesh`, and `Camera`.
"""
import sys, os, re
import inspect, itertools, math
import pymel.util as _util
import pymel.internal.pmcmds as cmds #@UnresolvedImport
import pymel.internal.factories as _factories
import pymel.api as _api #@UnresolvedImport
import pymel.internal.apicache as _apicache
import pymel.internal.pwarnings as _warnings
from pymel.internal import getLogger as _getLogger
import datatypes
_logger = _getLogger(__name__)
# to make sure Maya is up
import pymel.internal as internal
import pymel.versions as versions
from maya.cmds import about as _about
import maya.mel as mm
#from general import *
import general
import other
from animation import listAnimatable as _listAnimatable
from system import namespaceInfo as _namespaceInfo, FileReference as _FileReference
_thisModule = sys.modules[__name__]
#__all__ = ['Component', 'MeshEdge', 'MeshVertex', 'MeshFace', 'Attribute', 'DependNode' ]
## Mesh Components
# If we're reloading, clear the pynode types out
_factories.clearPyNodeTypes()
class DependNode( general.PyNode ):
__apicls__ = _api.MFnDependencyNode
__metaclass__ = _factories.MetaMayaNodeWrapper
#-------------------------------
# Name Info and Manipulation
#-------------------------------
# def __new__(cls,name,create=False):
# """
# Provides the ability to create the object when creating a class
#
# >>> n = pm.Transform("persp",create=True)
# >>> n.__repr__()
# # Result: nt.Transform(u'persp1')
# """
# if create:
# ntype = cls.__melnode__
# name = createNode(ntype,n=name,ss=1)
# return general.PyNode.__new__(cls,name)
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self._apiobject.object() )
@_util.universalmethod
def __melobject__(self):
"""Special method for returning a mel-friendly representation."""
if isinstance(self, DependNode):
# For instance, return the node's name...
return self.name()
else:
# For the class itself, return the mel node name
return self.__melnode__
def __repr__(self):
"""
:rtype: `unicode`
"""
return u"nt.%s(%r)" % (self.__class__.__name__, self.name())
def _updateName(self) :
# test validity
self.__apimobject__()
self._name = self.__apimfn__().name()
return self._name
def name(self, update=True, stripNamespace=False) :
"""
:rtype: `unicode`
"""
if update or self._name is None:
try:
self._updateName()
except general.MayaObjectError:
_logger.warn( "object %s no longer exists" % self._name )
name = self._name
if stripNamespace:
name = name.rsplit(':', 1)[-1]
return name
def shortName(self):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name()
def longName(self):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name()
def nodeName(self, **kwargs):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name(**kwargs)
#rename = rename
def rename( self, name, **kwargs ):
"""
:rtype: `DependNode`
"""
#self.setName( name ) # no undo support
#check for preserveNamespace a pymel unique flag
if kwargs.pop('preserveNamespace', False):
name = self.namespace(root=True) + name
#ensure shortname
if '|' in name:
name = name.split('|')[-1]
return general.rename(self, name, **kwargs)
def __apiobject__(self) :
"get the default API object (MObject) for this node if it is valid"
return self.__apimobject__()
def __apimobject__(self) :
"get the MObject for this node if it is valid"
handle = self.__apihandle__()
if _api.isValidMObjectHandle( handle ) :
return handle.object()
raise general.MayaNodeError( self._name )
def __apihandle__(self) :
return self.__apiobjects__['MObjectHandle']
def __str__(self):
return "%s" % self.name()
def __unicode__(self):
return u"%s" % self.name()
if versions.current() >= versions.v2009:
def __hash__(self):
return self.__apihandle__().hashCode()
def node(self):
"""for compatibility with Attribute class
:rtype: `DependNode`
"""
return self
#--------------------------
# Modification
#--------------------------
def lock( self, **kwargs ):
'lockNode -lock 1'
#kwargs['lock'] = True
#kwargs.pop('l',None)
#return cmds.lockNode( self, **kwargs)
return self.setLocked( True )
def unlock( self, **kwargs ):
'lockNode -lock 0'
#kwargs['lock'] = False
#kwargs.pop('l',None)
#return cmds.lockNode( self, **kwargs)
return self.setLocked( False )
def cast( self, swapNode, **kwargs):
"""nodeCast"""
return cmds.nodeCast( self, swapNode, *kwargs )
duplicate = general.duplicate
#--------------------------
#xxx{ Presets
#--------------------------
def savePreset(self, presetName, custom=None, attributes=[]):
kwargs = {'save':True}
if attributes:
kwargs['attributes'] = ' '.join(attributes)
if custom:
kwargs['custom'] = custom
return cmds.nodePreset( presetName, **kwargs)
def loadPreset(self, presetName):
kwargs = {'load':True}
return cmds.nodePreset( presetName, **kwargs)
def deletePreset(self, presetName):
kwargs = {'delete':True}
return cmds.nodePreset( presetName, **kwargs)
def listPresets(self):
kwargs = {'list':True}
return cmds.nodePreset( **kwargs)
#}
#--------------------------
#xxx{ Info
#--------------------------
type = general.nodeType
def referenceFile(self):
"""referenceQuery -file
Return the reference file to which this object belongs. None if object is not referenced
:rtype: `FileReference`
"""
try:
return _FileReference( cmds.referenceQuery( self, f=1) )
except RuntimeError:
None
isReadOnly = _factories.wrapApiMethod( _api.MFnDependencyNode, 'isFromReferencedFile', 'isReadOnly' )
def classification(self):
'getClassification'
return general.getClassification( self.type() )
#return self.__apimfn__().classification( self.type() )
#}
#--------------------------
#xxx{ Connections
#--------------------------
def inputs(self, **kwargs):
"""listConnections -source 1 -destination 0
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None )
kwargs['destination'] = False
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def outputs(self, **kwargs):
"""listConnections -source 0 -destination 1
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None )
kwargs['destination'] = True
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def sources(self, **kwargs):
"""listConnections -source 1 -destination 0
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None )
kwargs['destination'] = False
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def destinations(self, **kwargs):
"""listConnections -source 0 -destination 1
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None )
kwargs['destination'] = True
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def shadingGroups(self):
"""list any shading groups in the future of this object - works for
shading nodes, transforms, and shapes
Also see listSets(type=1) - which returns which 'rendering sets' the
object is a member of (and 'rendering sets' seem to consist only of
shading groups), whereas this method searches the object's future for
any nodes of type 'shadingEngine'.
:rtype: `DependNode` list
"""
return self.future(type='shadingEngine')
#}
#--------------------------
#xxx{ Attributes
#--------------------------
def __getattr__(self, attr):
try :
return getattr(super(general.PyNode, self), attr)
except AttributeError :
try:
return DependNode.attr(self,attr)
except general.MayaAttributeError, e:
# since we're being called via __getattr__ we don't know whether the user was intending
# to get a class method or a maya attribute, so we raise a more generic AttributeError
raise AttributeError,"%r has no attribute or method named '%s'" % (self, attr)
@_util.universalmethod
def attrDefaults(obj, attr): #@NoSelf
"""
Access to an attribute of a node. This does not require an instance:
>>> nt.Transform.attrDefaults('tx').isKeyable()
True
but it can use one if needed ( for example, for dynamically created attributes )
>>> nt.Transform(u'persp').attrDefaults('tx').isKeyable()
Note: this is still experimental.
"""
if inspect.isclass(obj):
self = None
cls = obj # keep things familiar
else:
self = obj # keep things familiar
cls = type(obj)
attributes = cls.__apiobjects__.setdefault('MFnAttributes', {})
attrObj = attributes.get(attr, None)
if not _api.isValidMObject(attrObj):
def toAttrObj(apiObj):
try:
attrObj = apiObj.attribute(attr)
if attrObj.isNull():
raise RuntimeError
except RuntimeError:
# just try it first, then check if it has the attribute if
# we errored (as opposed to always check first if the node
# has the attribute), on the assumption that this will be
# "faster" for most cases, where the node actually DOES have
# the attribute...
if not apiObj.hasAttribute(attr):
raise general.MayaAttributeError('%s.%s' % (cls.__melnode__, attr))
else:
# don't know why we got this error, so just reraise
raise
return attrObj
if self is None:
if hasattr(_api, 'MNodeClass'):
# Yay, we have MNodeClass, use it!
nodeCls = _api.MNodeClass(cls.__melnode__)
attrObj = toAttrObj(nodeCls)
else:
# We don't have an instance of the node, we need
# to make a ghost one...
with _apicache._GhostObjMaker(cls.__melnode__) as nodeObj:
if nodeObj is None:
# for instance, we get this if we have an abstract class...
raise RuntimeError("Unable to get attribute defaults for abstract node class %s, in versions prior to 2012" % cls.__melnode__)
nodeMfn = cls.__apicls__(nodeObj)
attrObj = toAttrObj(nodeMfn)
else:
nodeMfn = self.__apimfn__()
attrObj = toAttrObj(nodeMfn)
attributes[attr] = attrObj
return general.AttributeDefaults( attrObj )
def attr(self, attr):
"""
access to attribute plug of a node. returns an instance of the Attribute class for the
given attribute name.
:rtype: `Attribute`
"""
return self._attr(attr, False)
# Just have this alias because it will sometimes return attributes for an
# underlying shape, which we may want for DagNode.attr, but don't want for
# DependNode.attr (and using the on-shape result, instead of throwing it
# away and then finding it again on the shape, saves time for the DagNode
# case)
def _attr(self, attr, allowOtherNode):
#return Attribute( '%s.%s' % (self, attr) )
try :
if '.' in attr or '[' in attr:
# Compound or Multi Attribute
# there are a couple of different ways we can proceed:
# Option 1: back out to _api.toApiObject (via general.PyNode)
# return Attribute( self.__apiobject__(), self.name() + '.' + attr )
# Option 2: nameparse.
# this avoids calling self.name(), which can be slow
import pymel.util.nameparse as nameparse
nameTokens = nameparse.getBasicPartList( 'dummy.' + attr )
result = self.__apiobject__()
for token in nameTokens[1:]: # skip the first, bc it's the node, which we already have
if isinstance( token, nameparse.MayaName ):
if isinstance( result, _api.MPlug ):
# you can't get a child plug from a multi/array plug.
# if result is currently 'defaultLightList1.lightDataArray' (an array)
# and we're trying to get the next plug, 'lightDirection', then we need a dummy index.
# the following line will reuslt in 'defaultLightList1.lightDataArray[-1].lightDirection'
if result.isArray():
result = self.__apimfn__().findPlug( unicode(token) )
else:
result = result.child( self.__apimfn__().attribute( unicode(token) ) )
else: # Node
result = self.__apimfn__().findPlug( unicode(token) )
# # search children for the attribute to simulate cam.focalLength --> perspShape.focalLength
# except TypeError:
# for i in range(fn.childCount()):
# try: result = _api.MFnDagNode( fn.child(i) ).findPlug( unicode(token) )
# except TypeError: pass
# else:break
if isinstance( token, nameparse.NameIndex ):
if token.value != -1:
result = result.elementByLogicalIndex( token.value )
plug = result
else:
try:
plug = self.__apimfn__().findPlug( attr, False )
except RuntimeError:
# Don't use .findAlias, as it always returns the 'base'
# attribute - ie, if the alias is to foo[0].bar, it will
# just point to foo
# aliases
#obj = _api.MObject()
#self.__apimfn__().findAlias( attr, obj )
#plug = self.__apimfn__().findPlug( obj, False )
# the following technique gets aliased attributes as well. turning dagPlugs to off saves time because we already
# know the dagNode. however, certain attributes, such as rotatePivot, are detected as components,
# despite the fact that findPlug finds them as MPlugs. need to look into this
# TODO: test speed versus above method
try:
plug = _api.toApiObject(self.name() + '.' + attr, dagPlugs=False)
except RuntimeError:
raise
if not isinstance(plug, _api.MPlug):
raise RuntimeError
if not (allowOtherNode or plug.node() == self.__apimobject__()):
# we could have gotten an attribute on a shape object,
# which we don't want
raise RuntimeError
return general.Attribute( self.__apiobject__(), plug )
except RuntimeError:
# raise our own MayaAttributeError, which subclasses AttributeError and MayaObjectError
raise general.MayaAttributeError( '%s.%s' % (self, attr) )
hasAttr = general.hasAttr
@_factories.addMelDocs('setAttr')
def setAttr( self, attr, *args, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.setAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('setAttr')
def setDynamicAttr( self, attr, *args, **kwargs):
"""
same as `DependNode.setAttr` with the force flag set to True. This causes
the attribute to be created based on the passed input value.
"""
# for now, using strings is better, because there is no MPlug support
kwargs['force'] = True
return general.setAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('getAttr')
def getAttr( self, attr, *args, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.getAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('addAttr')
def addAttr( self, attr, **kwargs):
# for now, using strings is better, because there is no MPlug support
assert 'longName' not in kwargs and 'ln' not in kwargs
kwargs['longName'] = attr
return general.addAttr( unicode(self), **kwargs )
@_factories.addMelDocs('deleteAttr')
def deleteAttr( self, attr, *args, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.deleteAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('connectAttr')
def connectAttr( self, attr, destination, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.connectAttr( "%s.%s" % (self, attr), destination, **kwargs )
@_factories.addMelDocs('disconnectAttr')
def disconnectAttr( self, attr, destination=None, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.disconnectAttr( "%s.%s" % (self, attr), destination, **kwargs )
listAnimatable = _listAnimatable
def listAttr( self, **kwargs):
"""
listAttr
Modifications:
- returns an empty list when the result is None
- added 'alias' keyword to list attributes that have aliases
:rtype: `Attribute` list
"""
alias = kwargs.pop('alias', False)
# stringify fix
res = map( lambda x: self.attr(x), _util.listForNone(cmds.listAttr(self.name(), **kwargs)))
if alias:
res = [ x[1] for x in self.listAliases() if x[1] in res]
# aliases = dict( (x[1], x[0]) for x in general.aliasAttr(self.name()) )
# tmp = res
# res = []
# for at in tmp:
# try:
# res.append( aliases[at], at )
# except KeyError:
# pass
return res
def listAliases( self ):
"""
aliasAttr
Modifications:
- returns an empty list when the result is None
- when queried, returns a list of (alias, `Attribute`) pairs.
:rtype: (`str`, `Attribute`) list
"""
#tmp = _util.listForNone(cmds.aliasAttr(self.name(),query=True))
tmp = []
self.__apimfn__().getAliasList(tmp)
res = []
for i in range(0,len(tmp),2):
res.append((tmp[i], general.Attribute(self.node() + '.' + tmp[i+1])))
return res
def attrInfo( self, **kwargs):
"""attributeInfo
:rtype: `Attribute` list
"""
# stringify fix
return map( lambda x: self.attr(x) , _util.listForNone(cmds.attributeInfo(self.name(), **kwargs)))
#}
#-----------------------------------------
#xxx{ Name Info and Manipulation
#-----------------------------------------
# Now just wraps NameParser functions
def stripNum(self):
"""Return the name of the node with trailing numbers stripped off. If no trailing numbers are found
the name will be returned unchanged.
>>> from pymel.core import *
>>> SCENE.lambert1.stripNum()
u'lambert'
:rtype: `unicode`
"""
return other.NameParser(self).stripNum()
def extractNum(self):
"""Return the trailing numbers of the node name. If no trailing numbers are found
an error will be raised.
>>> from pymel.core import *
>>> SCENE.lambert1.extractNum()
u'1'
:rtype: `unicode`
"""
return other.NameParser(self).extractNum()
def nextUniqueName(self):
"""Increment the trailing number of the object until a unique name is found
If there is no trailing number, appends '1' to the name.
:rtype: `unicode`
"""
return other.NameParser(self).nextUniqueName()
def nextName(self):
"""Increment the trailing number of the object by 1
Raises an error if the name has no trailing number.
>>> from pymel.core import *
>>> SCENE.lambert1.nextName()
DependNodeName(u'lambert2')
:rtype: `unicode`
"""
return other.NameParser(self).nextName()
def prevName(self):
"""Decrement the trailing number of the object by 1
Raises an error if the name has no trailing number.
:rtype: `unicode`
"""
return other.NameParser(self).prevName()
@classmethod
def registerVirtualSubClass( cls, nameRequired=False ):
"""
Deprecated
"""
_factories.registerVirtualClass(cls, nameRequired)
#}
if versions.current() >= versions.v2011:
class ContainerBase(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
class Entity(ContainerBase):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
else:
class Entity(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
class DagNode(Entity):
#:group Path Info and Modification: ``*parent*``, ``*Parent*``, ``*child*``, ``*Child*``
"""
"""
__apicls__ = _api.MFnDagNode
__metaclass__ = _factories.MetaMayaNodeWrapper
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self.__apimdagpath__() )
_componentAttributes = {}
def comp(self, compName):
"""
Will retrieve a Component object for this node; similar to
DependNode.attr(), but for components.
:rtype: `Component`
"""
if compName in self._componentAttributes:
compClass = self._componentAttributes[compName]
if isinstance(compClass, tuple):
# We have something like:
# 'uIsoparm' : (NurbsSurfaceIsoparm, 'u')
# need to specify what 'flavor' of the basic
# component we need...
return compClass[0](self, {compClass[1]:general.ComponentIndex(label=compClass[1])})
else:
return compClass(self)
# if we do self.getShape(), and this is a shape node, we will
# enter a recursive loop if compName isn't actually a comp:
# since shape doesn't have 'getShape', it will call __getattr__
# for 'getShape', which in turn call comp to check if it's a comp,
# which will call __getattr__, etc
# ..soo... check if we have a 'getShape'!
# ...also, don't use 'hasattr', as this will also call __getattr__!
try:
object.__getattribute__(self, 'getShape')
except AttributeError:
raise general.MayaComponentError( '%s.%s' % (self, compName) )
else:
shape = self.getShape()
if shape:
return shape.comp(compName)
def listComp(self, names=False):
"""Will return a list of all component objects for this object
Is to .comp() what .listAttr() is to .attr(); will NOT check the shape
node.
Parameters
----------
names : bool
By default, will return a list of actual usabale pymel Component
objects; if you just want a list of string names which would
be compatible with .comp(), set names to True
"""
keys = sorted(self._componentAttributes.keys())
if names:
return keys
compTypes = set()
comps = []
# use the sorted keys, so the order matches that returned by names,
# minus duplicate entries for aliases
for name in keys:
compType = self._componentAttributes[name]
if compType not in compTypes:
compTypes.add(compType)
comps.append(self.comp(name))
return comps
def _updateName(self, long=False) :
#if _api.isValidMObjectHandle(self._apiobject) :
#obj = self._apiobject.object()
#dagFn = _api.MFnDagNode(obj)
#dagPath = _api.MDagPath()
#dagFn.getPath(dagPath)
dag = self.__apimdagpath__()
if dag:
name = dag.partialPathName()
if not name:
raise general.MayaNodeError
self._name = name
if long :
return dag.fullPathName()
return self._name
def name(self, update=True, long=False) :
if update or long or self._name is None:
try:
return self._updateName(long)
except general.MayaObjectError:
_logger.warn( "object %s no longer exists" % self._name )
return self._name
def longName(self,stripNamespace=False,levels=0):
"""
The full dag path to the object, including leading pipe ( | )
:rtype: `unicode`
"""
if stripNamespace:
name = self.name(long=True)
nodes = []
for x in name.split('|'):
y = x.split('.')
z = y[0].split(':')
if levels:
y[0] = ':'.join( z[min(len(z)-1,levels):] )
else:
y[0] = z[-1]
nodes.append( '.'.join( y ) )
stripped_name = '|'.join( nodes)
return stripped_name
return self.name(long=True)
fullPath = longName
def shortName( self ):
"""
The shortest unique name.
:rtype: `unicode`
"""
return self.name(long=False)
def nodeName( self, stripNamespace=False ):
"""
Just the name of the node, without any dag path
:rtype: `unicode`
"""
name = self.name().rsplit('|', 1)[-1]
if stripNamespace:
name = name.rsplit(':', 1)[-1]
return name
def __apiobject__(self) :
"get the MDagPath for this object if it is valid"
return self.__apimdagpath__()
def __apimdagpath__(self) :
"get the MDagPath for this object if it is valid"
try:
dag = self.__apiobjects__['MDagPath']
# test for validity: if the object is not valid an error will be raised
self.__apimobject__()
return dag
except KeyError:
# class was instantiated from an MObject, but we can still retrieve the first MDagPath
#assert argObj.hasFn( _api.MFn.kDagNode )
dag = _api.MDagPath()
# we can't use self.__apimfn__() becaue the mfn is instantiated from an MDagPath
# which we are in the process of finding out
mfn = _api.MFnDagNode( self.__apimobject__() )
mfn.getPath(dag)
self.__apiobjects__['MDagPath'] = dag
return dag
# if dag.isValid():
# #argObj = dag
# if dag.fullPathName():
# argObj = dag
# else:
# print 'produced valid MDagPath with no name: %s(%s)' % ( argObj.apiTypeStr(), _api.MFnDependencyNode(argObj).name() )
def __apihandle__(self) :
try:
handle = self.__apiobjects__['MObjectHandle']
except KeyError:
try:
handle = _api.MObjectHandle( self.__apiobjects__['MDagPath'].node() )
except RuntimeError:
raise general.MayaNodeError( self._name )
self.__apiobjects__['MObjectHandle'] = handle
return handle
# def __apimfn__(self):
# if self._apimfn:
# return self._apimfn
# elif self.__apicls__:
# obj = self._apiobject
# if _api.isValidMDagPath(obj):
# try:
# self._apimfn = self.__apicls__(obj)
# return self._apimfn
# except KeyError:
# pass
# def __init__(self, *args, **kwargs):
# if self._apiobject:
# if isinstance(self._apiobject, _api.MObjectHandle):
# dagPath = _api.MDagPath()
# _api.MDagPath.getAPathTo( self._apiobject.object(), dagPath )
# self._apiobject = dagPath
#
# assert _api.isValidMDagPath( self._apiobject )
# def __init__(self, *args, **kwargs) :
# if args :
# arg = args[0]
# if len(args) > 1 :
# comp = args[1]
# if isinstance(arg, DagNode) :
# self._name = unicode(arg.name())
# self._apiobject = _api.MObjectHandle(arg.object())
# elif _api.isValidMObject(arg) or _api.isValidMObjectHandle(arg) :
# objHandle = _api.MObjectHandle(arg)
# obj = objHandle.object()
# if _api.isValidMDagNode(obj) :
# self._apiobject = objHandle
# self._updateName()
# else :
# raise TypeError, "%r might be a dependencyNode, but not a dagNode" % arg
# elif isinstance(arg, basestring) :
# obj = _api.toMObject (arg)
# if obj :
# # creation for existing object
# if _api.isValidMDagNode (obj):
# self._apiobject = _api.MObjectHandle(obj)
# self._updateName()
# else :
# raise TypeError, "%r might be a dependencyNode, but not a dagNode" % arg
# else :
# # creation for inexistent object
# self._name = arg
# else :
# raise TypeError, "don't know how to make a DagNode out of a %s : %r" % (type(arg), arg)
#--------------------------------
#xxx{ Path Info and Modification
#--------------------------------
def root(self):
"""rootOf
:rtype: `unicode`
"""
return DagNode( '|' + self.longName()[1:].split('|')[0] )
# def hasParent(self, parent ):
# try:
# return self.__apimfn__().hasParent( parent.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(parent)
# if obj:
# return self.__apimfn__().hasParent( obj )
#
# def hasChild(self, child ):
# try:
# return self.__apimfn__().hasChild( child.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(child)
# if obj:
# return self.__apimfn__().hasChild( obj )
#
# def isParentOf( self, parent ):
# try:
# return self.__apimfn__().isParentOf( parent.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(parent)
# if obj:
# return self.__apimfn__().isParentOf( obj )
#
# def isChildOf( self, child ):
# try:
# return self.__apimfn__().isChildOf( child.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(child)
# if obj:
# return self.__apimfn__().isChildOf( obj )
def isInstanceOf(self, other):
"""
:rtype: `bool`
"""
if isinstance( other, general.PyNode ):
return self.__apimobject__() == other.__apimobject__()
else:
try:
return self.__apimobject__() == general.PyNode(other).__apimobject__()
except:
return False
def instanceNumber(self):
"""
returns the instance number that this path represents in the DAG. The instance number can be used to determine which
element of the world space array attributes of a DAG node to connect to get information regarding this instance.
:rtype: `int`
"""
return self.__apimdagpath__().instanceNumber()
def getInstances(self, includeSelf=True):
"""
:rtype: `DagNode` list
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> s = polyPlane()[0]
>>> instance(s)
[nt.Transform(u'pPlane2')]
>>> instance(s)
[nt.Transform(u'pPlane3')]
>>> s.getShape().getInstances()
[nt.Mesh(u'pPlane1|pPlaneShape1'), nt.Mesh(u'pPlane2|pPlaneShape1'), nt.Mesh(u'pPlane3|pPlaneShape1')]
>>> s.getShape().getInstances(includeSelf=False)
[nt.Mesh(u'pPlane2|pPlaneShape1'), nt.Mesh(u'pPlane3|pPlaneShape1')]
"""
d = _api.MDagPathArray()
self.__apimfn__().getAllPaths(d)
thisDagPath = self.__apimdagpath__()
result = [ general.PyNode( _api.MDagPath(d[i])) for i in range(d.length()) if includeSelf or not d[i] == thisDagPath ]
return result
def getOtherInstances(self):
"""
same as `DagNode.getInstances` with includeSelf=False.
:rtype: `DagNode` list
"""
return self.getInstances(includeSelf=False)
def firstParent(self):
"""firstParentOf
:rtype: `DagNode`
"""
try:
return DagNode( '|'.join( self.longName().split('|')[:-1] ) )
except TypeError:
return DagNode( '|'.join( self.split('|')[:-1] ) )
# def numChildren(self):
# """
# see also `childCount`
#
# :rtype: `int`
# """
# return self.__apimdagpath__().childCount()
# def getParent(self, **kwargs):
# # TODO : print warning regarding removal of kwargs, test speed difference
# parent = _api.MDagPath( self.__apiobject__() )
# try:
# parent.pop()
# return general.PyNode(parent)
# except RuntimeError:
# pass
#
# def getChildren(self, **kwargs):
# # TODO : print warning regarding removal of kwargs
# children = []
# thisDag = self.__apiobject__()
# for i in range( thisDag.childCount() ):
# child = _api.MDagPath( thisDag )
# child.push( thisDag.child(i) )
# children.append( general.PyNode(child) )
# return children
def firstParent2(self, **kwargs):
"""unlike the firstParent command which determines the parent via string formatting, this
command uses the listRelatives command
"""
kwargs['parent'] = True
kwargs.pop('p',None)
#if longNames:
kwargs['fullPath'] = True
kwargs.pop('f',None)
try:
res = cmds.listRelatives( self, **kwargs)[0]
except TypeError:
return None
res = general.PyNode( res )
return res
@staticmethod
def _getDagParent(dag):
if dag.length() <= 1:
return None
# Need a copy as we'll be modifying it...
dag = _api.MDagPath(dag)
dag.pop()
return dag
def getParent(self, generations=1):
"""
Modifications:
- added optional generations flag, which gives the number of levels up that you wish to go for the parent;
ie:
>>> from pymel.core import *
>>> select(cl=1)
>>> bottom = group(n='bottom')
>>> group(n='almostThere')
nt.Transform(u'almostThere')
>>> group(n='nextLevel')
nt.Transform(u'nextLevel')
>>> group(n='topLevel')
nt.Transform(u'topLevel')
>>> bottom.longName()
u'|topLevel|nextLevel|almostThere|bottom'
>>> bottom.getParent(2)
nt.Transform(u'nextLevel')
Negative values will traverse from the top:
>>> bottom.getParent(generations=-3)
nt.Transform(u'almostThere')
A value of 0 will return the same node.
The default value is 1.
If generations is None, it will be interpreted as 'return all
parents', and a list will be returned.
Since the original command returned None if there is no parent, to sync with this behavior, None will
be returned if generations is out of bounds (no IndexError will be thrown).
:rtype: `DagNode`
"""
# Get the parent through the api - listRelatives doesn't handle instances correctly,
# and string processing seems unreliable...
res = general._getParent(self._getDagParent, self.__apimdagpath__(), generations)
if generations is None:
if res is None:
return []
return [general.PyNode(x) for x in res]
elif res is not None:
return general.PyNode( res )
def getAllParents(self):
"""
Return a list of all parents above this.
Starts from the parent immediately above, going up.
:rtype: `DagNode` list
"""
return self.getParent(generations=None)
def getChildren(self, **kwargs ):
"""
see also `childAtIndex`
for flags, see pymel.core.general.listRelatives
:rtype: `DagNode` list
"""
kwargs['children'] = True
kwargs.pop('c',None)
return general.listRelatives( self, **kwargs)
def getSiblings(self, **kwargs ):
"""
for flags, see pymel.core.general.listRelatives
:rtype: `DagNode` list
"""
#pass
try:
return [ x for x in self.getParent().getChildren(**kwargs) if x != self]
except:
return []
def listRelatives(self, **kwargs ):
"""
for flags, see pymel.core.general.listRelatives
:rtype: `PyNode` list
"""
return general.listRelatives( self, **kwargs)
def setParent( self, *args, **kwargs ):
"""
parent
Modifications:
- if parent is 'None', world=True is automatically set
- if the given parent is the current parent, don't error
"""
result = general.parent(self, *args, **kwargs)
if result:
result = result[0]
return result
def addChild( self, child, **kwargs ):
"""parent (reversed)
:rtype: `DagNode`
"""
cmds.parent( child, self, **kwargs )
if not isinstance( child, general.PyNode ):
child = general.PyNode(child)
return child
def __or__(self, child, **kwargs):
"""
operator for `addChild`. Use to easily daisy-chain together parenting operations.
The operation order visually mimics the resulting dag path:
>>> from pymel.core import *
>>> s = polySphere(name='sphere')[0]
>>> c = polyCube(name='cube')[0]
>>> t = polyTorus(name='torus')[0]
>>> s | c | t
nt.Transform(u'torus')
>>> print t.fullPath()
|sphere|cube|torus
:rtype: `DagNode`
"""
return self.addChild(child,**kwargs)
#}
#instance = instance
#--------------------------
# Shading
#--------------------------
def isDisplaced(self):
"""Returns whether any of this object's shading groups have a displacement shader input
:rtype: `bool`
"""
for sg in self.shadingGroups():
if len( sg.attr('displacementShader').inputs() ):
return True
return False
def hide(self):
self.visibility.set(0)
def show(self):
self.visibility.set(1)
def isVisible(self, checkOverride=True):
if not self.attr('visibility').get():
return False
if (checkOverride and self.attr('overrideEnabled').get()
and not self.attr('overrideVisibility').get()):
return False
parent = self.getParent()
if not parent:
return True
else:
return parent.isVisible(checkOverride=checkOverride)
def setObjectColor( self, color=None ):
"""This command sets the dormant wireframe color of the specified objects to an integer
representing one of the user defined colors, or, if set to None, to the default class color"""
kwargs = {}
if color:
kwargs['userDefined'] = color
cmds.color(self, **kwargs)
def makeLive( self, state=True ):
if not state:
cmds.makeLive(none=True)
else:
cmds.makeLive(self)
class Shape(DagNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getTransform(self): pass
def setParent(self, *args, **kwargs):
if 'shape' not in kwargs and 's' not in kwargs:
kwargs['s'] = True
super(Shape, self).setParent(*args, **kwargs)
#class Joint(Transform):
# pass
class Camera(Shape):
__metaclass__ = _factories.MetaMayaNodeWrapper
def applyBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['edit'] = True
kwargs['setCamera'] = True
cmds.cameraView( bookmark, **kwargs )
def addBookmark(self, bookmark=None):
kwargs = {}
kwargs['camera'] = self
kwargs['addBookmark'] = True
if bookmark:
kwargs['name'] = bookmark
cmds.cameraView( **kwargs )
def removeBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['removeBookmark'] = True
kwargs['name'] = bookmark
cmds.cameraView( **kwargs )
def updateBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['edit'] = True
kwargs['setView'] = True
cmds.cameraView( bookmark, **kwargs )
def listBookmarks(self):
return self.bookmarks.inputs()
@_factories.addMelDocs('dolly')
def dolly(self, distance, relative=True):
kwargs = {}
kwargs['distance'] = distance
if relative:
kwargs['relative'] = True
else:
kwargs['absolute'] = True
cmds.dolly(self, **kwargs)
@_factories.addMelDocs('roll')
def roll(self, degree, relative=True):
kwargs = {}
kwargs['degree'] = degree
if relative:
kwargs['relative'] = True
else:
kwargs['absolute'] = True
cmds.roll(self, **kwargs)
#TODO: the functionFactory is causing these methods to have their docs doubled-up, in both pymel.track, and pymel.Camera.track
#dolly = _factories.functionFactory( cmds.dolly )
#roll = _factories.functionFactory( cmds.roll )
orbit = _factories.functionFactory( cmds.orbit )
track = _factories.functionFactory( cmds.track )
tumble = _factories.functionFactory( cmds.tumble )
class Transform(DagNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'rotatePivot' : (general.Pivot, 'rotatePivot'),
'scalePivot' : (general.Pivot, 'scalePivot')}
# def __getattr__(self, attr):
# try :
# return super(general.PyNode, self).__getattr__(attr)
# except AttributeError, msg:
# try:
# return self.getShape().attr(attr)
# except AttributeError:
# pass
#
# # it doesn't exist on the class
# try:
# return self.attr(attr)
# except MayaAttributeError, msg:
# # try the shape
# try: return self.getShape().attr(attr)
# except AttributeError: pass
# # since we're being called via __getattr__ we don't know whether the user was trying
# # to get a class method or a maya attribute, so we raise a more generic AttributeError
# raise AttributeError, msg
def __getattr__(self, attr):
"""
Checks in the following order:
1. Functions on this node class
2. Attributes on this node class
3. Functions on this node class's shape
4. Attributes on this node class's shape
"""
try :
#print "Transform.__getattr__(%r)" % attr
# Functions through normal inheritance
res = DependNode.__getattr__(self,attr)
except AttributeError, e:
# Functions via shape inheritance , and then, implicitly, Attributes
for shape in self.getShapes():
try:
return getattr(shape,attr)
except AttributeError: pass
raise e
return res
def __setattr__(self, attr, val):
"""
Checks in the following order:
1. Functions on this node class
2. Attributes on this node class
3. Functions on this node class's shape
4. Attributes on this node class's shape
"""
try :
#print "Transform.__setattr__", attr, val
# Functions through normal inheritance
return DependNode.__setattr__(self,attr,val)
except AttributeError, e:
# Functions via shape inheritance , and then, implicitly, Attributes
#print "Trying shape"
shape = self.getShape()
if shape:
try:
return setattr(shape,attr, val)
except AttributeError: pass
raise e
def attr(self, attr, checkShape=True):
"""
when checkShape is enabled, if the attribute does not exist the transform but does on the shape, then the shape's attribute will
be returned.
:rtype: `Attribute`
"""
#print "ATTR: Transform"
try :
res = self._attr(attr, checkShape)
except general.MayaAttributeError, e:
if checkShape:
try:
res = self.getShape().attr(attr)
except AttributeError:
raise e
raise e
return res
# def __getattr__(self, attr):
# if attr.startswith('__') and attr.endswith('__'):
# return super(general.PyNode, self).__getattr__(attr)
#
# at = Attribute( '%s.%s' % (self, attr) )
#
# # if the attribute does not exist on this node try the shape node
# if not at.exists():
# try:
# childAttr = getattr( self.getShape(), attr)
# try:
# if childAttr.exists():
# return childAttr
# except AttributeError:
# return childAttr
# except (AttributeError,TypeError):
# pass
#
# return at
#
# def __setattr__(self, attr,val):
# if attr.startswith('_'):
# attr = attr[1:]
#
# at = Attribute( '%s.%s' % (self, attr) )
#
# # if the attribute does not exist on this node try the shape node
# if not at.exists():
# try:
# childAttr = getattr( self.getShape(), attr )
# try:
# if childAttr.exists():
# return childAttr.set(val)
# except AttributeError:
# return childAttr.set(val)
# except (AttributeError,TypeError):
# pass
#
# return at.set(val)
"""
def move( self, *args, **kwargs ):
return move( self, *args, **kwargs )
def scale( self, *args, **kwargs ):
return scale( self, *args, **kwargs )
def rotate( self, *args, **kwargs ):
return rotate( self, *args, **kwargs )
def align( self, *args, **kwargs):
args = (self,) + args
cmds.align(self, *args, **kwargs)
"""
# NOTE : removed this via proxyClass
# # workaround for conflict with translate method on basestring
# def _getTranslate(self):
# return self.__getattr__("translate")
# def _setTranslate(self, val):
# return self.__setattr__("translate", val)
# translate = property( _getTranslate , _setTranslate )
def getShape( self, **kwargs ):
"""
:rtype: `DagNode`
"""
kwargs['shapes'] = True
try:
return self.getChildren( **kwargs )[0]
except IndexError:
pass
def getShapes( self, **kwargs ):
"""
:rtype: `DagNode`
"""
kwargs['shapes'] = True
return self.getChildren( **kwargs )
def ungroup( self, **kwargs ):
return cmds.ungroup( self, **kwargs )
# @_factories.editflag('xform','scale')
# def setScale( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','rotation')
# def setRotationOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','translation')
# def setTranslationOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','scalePivot')
# def setScalePivotOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','rotatePivot')
# def setRotatePivotOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','pivots')
# def setPivots( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','rotateAxis')
# def setRotateAxisOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','shear')
# def setShearingOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
@_factories.addMelDocs('xform','rotateAxis')
def setMatrix( self, val, **kwargs ):
"""xform -scale"""
kwargs['matrix'] = val
cmds.xform( self, **kwargs )
# @_factories.queryflag('xform','scale')
# def getScaleOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
def _getSpaceArg(self, space, kwargs):
"for internal use only"
if kwargs.pop( 'worldSpace', kwargs.pop('ws', False) ):
space = 'world'
elif kwargs.pop( 'objectSpace', kwargs.pop('os', False) ):
space = 'object'
return space
def _isRelativeArg(self, kwargs ):
isRelative = kwargs.pop( 'relative', kwargs.pop('r', None) )
if isRelative is None:
isRelative = not kwargs.pop( 'absolute', kwargs.pop('a', True) )
return isRelative
# @_factories.queryflag('xform','translation')
# def getTranslationOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setTranslation' )
def setTranslation(self, vector, space='object', **kwargs):
if self._isRelativeArg(kwargs):
return self.translateBy(vector, space, **kwargs)
space = self._getSpaceArg(space, kwargs )
return self._setTranslation(vector, space=space)
@_factories.addApiDocs( _api.MFnTransform, 'getTranslation' )
def getTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getTranslation(space=space)
@_factories.addApiDocs( _api.MFnTransform, 'translateBy' )
def translateBy(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
curr = self._getTranslation(space)
self._translateBy(vector, space)
new = self._getTranslation(space)
undoItem = _factories.ApiUndoItem(Transform.setTranslation, (self, new, space), (self, curr, space) )
_factories.apiUndo.append( undoItem )
@_factories.addApiDocs( _api.MFnTransform, 'setScale' )
def setScale(self, scale, **kwargs):
if self._isRelativeArg(kwargs):
return self.scaleBy(scale, **kwargs)
return self._setScale(scale)
@_factories.addApiDocs( _api.MFnTransform, 'scaleBy' )
def scaleBy(self, scale, **kwargs):
curr = self.getScale()
self._scaleBy(scale)
new = self.getScale()
undoItem = _factories.ApiUndoItem(Transform.setScale, (self, new), (self, curr) )
_factories.apiUndo.append( undoItem )
@_factories.addApiDocs( _api.MFnTransform, 'setShear' )
def setShear(self, shear, **kwargs):
if self._isRelativeArg(kwargs):
return self.shearBy(shear, **kwargs)
return self._setShear(shear)
@_factories.addApiDocs( _api.MFnTransform, 'shearBy' )
def shearBy(self, shear, **kwargs):
curr = self.getShear()
self._shearBy(shear)
new = self.getShear()
undoItem = _factories.ApiUndoItem(Transform.setShear, (self, new), (self, curr) )
_factories.apiUndo.append( undoItem )
# @_factories.queryflag('xform','rotatePivot')
# def getRotatePivotOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setRotatePivot' )
def setRotatePivot(self, point, space='object', balance=True, **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setRotatePivot(point, space=space, balance=balance)
@_factories.addApiDocs( _api.MFnTransform, 'rotatePivot' )
def getRotatePivot(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getRotatePivot(space=space)
@_factories.addApiDocs( _api.MFnTransform, 'setRotatePivotTranslation' )
def setRotatePivotTranslation(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setRotatePivotTranslation(vector, space=space)
@_factories.addApiDocs( _api.MFnTransform, 'rotatePivotTranslation' )
def getRotatePivotTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getRotatePivotTranslation(space=space)
# @_factories.queryflag('xform','rotation')
# def getRotationOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setRotation' )
def setRotation(self, rotation, space='object', **kwargs):
'''
Modifications:
- rotation may be given as an EulerRotation, Quaternion, or iterable of 3
or 4 components (to specify an euler/quaternion, respectively)
'''
# quaternions are the only method that support a space parameter
if self._isRelativeArg(kwargs):
return self.rotateBy(rotation, space, **kwargs)
spaceIndex = datatypes.Spaces.getIndex(self._getSpaceArg(space, kwargs))
if not isinstance(rotation, (_api.MQuaternion, _api.MEulerRotation)):
rotation = list(rotation)
if len(rotation) == 3:
# using datatypes.Angle(x) means current angle-unit should be
# respected
rotation = [ datatypes.Angle( x ).asRadians() for x in rotation ]
rotation = _api.MEulerRotation( *rotation )
elif len(rotation) == 4:
rotation = _api.MQuaternion(*rotation)
else:
raise ValueError("rotation given to setRotation must have either 3 or 4 elements (for euler or quaternion, respectively)")
if isinstance(rotation, _api.MEulerRotation):
# MFnTransform.setRotation doesn't have a (non-deprecated) override
# which takes euler angles AND a transform space... this sort of
# makes sense, since the "unique" information that euler angles can
# potentially carry - ie, rotation > 360 degress - only really makes
# sense within the "transform" space. So, only use EulerRotation if
# we're using transform space...
if datatypes.equivalentSpace(spaceIndex, _api.MSpace.kTransform,
rotationOnly=True):
self.__apimfn__().setRotation(rotation)
return
else:
rotation = rotation.asQuaternion()
self.__apimfn__().setRotation(rotation, spaceIndex )
# @_factories.addApiDocs( _api.MFnTransform, 'getRotation' )
# def getRotation(self, space='object', **kwargs):
# # quaternions are the only method that support a space parameter
# space = self._getSpaceArg(space, kwargs )
# quat = _api.MQuaternion()
# _api.MFnTransform(self.__apimfn__()).getRotation(quat, datatypes.Spaces.getIndex(space) )
# return datatypes.EulerRotation( quat.asEulerRotation() )
@_factories.addApiDocs( _api.MFnTransform, 'getRotation', overloadIndex=1 )
def getRotation(self, space='object', quaternion=False, **kwargs):
'''
Modifications:
- added 'quaternion' keyword arg, to specify whether the result
be returned as a Quaternion object, as opposed to the default
EulerRotation object
- added 'space' keyword arg, which defaults to 'object'
'''
# quaternions are the only method that support a space parameter
space = self._getSpaceArg(space, kwargs )
if space.lower() in ('object', 'pretransform', 'transform') and not quaternion:
# In this case, we can just go straight to the EulerRotation,
# without having to go through Quaternion - this means we will
# get information like angles > 360 degrees
euler = _api.MEulerRotation()
self.__apimfn__().getRotation(euler)
rot = datatypes.EulerRotation(euler)
else:
rot = self._getRotation(space=space)
if not quaternion:
rot = rot.asEulerRotation()
if isinstance(rot, datatypes.EulerRotation):
rot.setDisplayUnit( datatypes.Angle.getUIUnit() )
return rot
@_factories.addApiDocs( _api.MFnTransform, 'rotateBy' )
def rotateBy(self, rotation, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
curr = self.getRotation(space)
self._rotateBy(rotation, space)
new = self.getRotation(space)
undoItem = _factories.ApiUndoItem(Transform.setRotation, (self, new, space), (self, curr, space) )
_factories.apiUndo.append( undoItem )
# @_factories.queryflag('xform','scalePivot')
# def getScalePivotOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setScalePivot' )
def setScalePivot(self, point, space='object', balance=True, **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setScalePivot(point, space=space, balance=balance)
@_factories.addApiDocs( _api.MFnTransform, 'scalePivot' )
def getScalePivot(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getScalePivot(space=space)
@_factories.addApiDocs( _api.MFnTransform, 'setScalePivotTranslation' )
def setScalePivotTranslation(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setScalePivotTranslation(vector, space=space)
@_factories.addApiDocs( _api.MFnTransform, 'scalePivotTranslation' )
def getScalePivotTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getScalePivotTranslation(space=space)
@_factories.queryflag('xform','pivots')
def getPivots( self, **kwargs ):
res = cmds.xform( self, **kwargs )
return ( datatypes.Vector( res[:3] ), datatypes.Vector( res[3:] ) )
@_factories.queryflag('xform','rotateAxis')
def getRotateAxis( self, **kwargs ):
return datatypes.Vector( cmds.xform( self, **kwargs ) )
# @_factories.queryflag('xform','shear')
# def getShearOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.queryflag('xform','matrix')
def getMatrix( self, **kwargs ):
return datatypes.Matrix( cmds.xform( self, **kwargs ) )
#TODO: create API equivalent of `xform -boundingBoxInvisible` so we can replace this with _api.
def getBoundingBox(self, invisible=False, space='object'):
"""xform -boundingBox and xform -boundingBoxInvisible
:rtype: `BoundingBox`
"""
kwargs = {'query' : True }
if invisible:
kwargs['boundingBoxInvisible'] = True
else:
kwargs['boundingBox'] = True
if space=='object':
kwargs['objectSpace'] = True
elif space=='world':
kwargs['worldSpace'] = True
else:
raise ValueError('unknown space %r' % space)
res = cmds.xform( self, **kwargs )
#return ( datatypes.Vector(res[:3]), datatypes.Vector(res[3:]) )
return datatypes.BoundingBox( res[:3], res[3:] )
def getBoundingBoxMin(self, invisible=False, space='object'):
"""
:rtype: `Vector`
"""
return self.getBoundingBox(invisible, space)[0]
#return self.getBoundingBox(invisible).min()
def getBoundingBoxMax(self, invisible=False, space='object'):
"""
:rtype: `Vector`
"""
return self.getBoundingBox(invisible, space)[1]
#return self.getBoundingBox(invisible).max()
# def centerPivots(self, **kwargs):
# """xform -centerPivots"""
# kwargs['centerPivots'] = True
# cmds.xform( self, **kwargs )
#
# def zeroTransformPivots(self, **kwargs):
# """xform -zeroTransformPivots"""
# kwargs['zeroTransformPivots'] = True
# cmds.xform( self, **kwargs )
class Joint(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
connect = _factories.functionFactory( cmds.connectJoint, rename='connect')
disconnect = _factories.functionFactory( cmds.disconnectJoint, rename='disconnect')
insert = _factories.functionFactory( cmds.insertJoint, rename='insert')
if versions.isUnlimited():
class FluidEmitter(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
fluidVoxelInfo = _factories.functionFactory( cmds.fluidVoxelInfo, rename='fluidVoxelInfo')
loadFluid = _factories.functionFactory( cmds.loadFluid, rename='loadFluid')
resampleFluid = _factories.functionFactory( cmds.resampleFluid, rename='resampleFluid')
saveFluid = _factories.functionFactory( cmds.saveFluid, rename='saveFluid')
setFluidAttr = _factories.functionFactory( cmds.setFluidAttr, rename='setFluidAttr')
getFluidAttr = _factories.functionFactory( cmds.getFluidAttr, rename='getFluidAttr')
class RenderLayer(DependNode):
def listMembers(self, fullNames=True):
if fullNames:
return map( general.PyNode, _util.listForNone( cmds.editRenderLayerMembers( self, q=1, fullNames=True) ) )
else:
return _util.listForNone( cmds.editRenderLayerMembers( self, q=1, fullNames=False) )
def addMembers(self, members, noRecurse=True):
cmds.editRenderLayerMembers( self, members, noRecurse=noRecurse )
def removeMembers(self, members ):
cmds.editRenderLayerMembers( self, members, remove=True )
def listAdjustments(self):
return map( general.PyNode, _util.listForNone( cmds.editRenderLayerAdjustment( self, layer=1, q=1) ) )
def addAdjustments(self, members):
return cmds.editRenderLayerAdjustment( members, layer=self )
def removeAdjustments(self, members ):
return cmds.editRenderLayerAdjustment( members, layer=self, remove=True )
def setCurrent(self):
cmds.editRenderLayerGlobals( currentRenderLayer=self)
class DisplayLayer(DependNode):
def listMembers(self, fullNames=True):
if fullNames:
return map( general.PyNode, _util.listForNone( cmds.editDisplayLayerMembers( self, q=1, fullNames=True) ) )
else:
return _util.listForNone( cmds.editDisplayLayerMembers( self, q=1, fullNames=False) )
def addMembers(self, members, noRecurse=True):
cmds.editDisplayLayerMembers( self, members, noRecurse=noRecurse )
def removeMembers(self, members ):
cmds.editDisplayLayerMembers( self, members, remove=True )
def setCurrent(self):
cmds.editDisplayLayerMembers( currentDisplayLayer=self)
class Constraint(Transform):
def setWeight( self, weight, *targetObjects ):
inFunc = getattr( cmds, self.type() )
if not targetObjects:
targetObjects = self.getTargetList()
constraintObj = self.constraintParentInverseMatrix.inputs()[0]
args = list(targetObjects) + [constraintObj]
return inFunc( *args, **{'edit':True, 'weight':weight} )
def getWeight( self, *targetObjects ):
inFunc = getattr( cmds, self.type() )
if not targetObjects:
targetObjects = self.getTargetList()
constraintObj = self.constraintParentInverseMatrix.inputs()[0]
args = list(targetObjects) + [constraintObj]
return inFunc( *args, **{'query':True, 'weight':True} )
class GeometryShape(Shape):
def __getattr__(self, attr):
#print "Mesh.__getattr__", attr
try:
return self.comp(attr)
except general.MayaComponentError:
#print "getting super", attr
return super(GeometryShape,self).__getattr__(attr)
class DeformableShape(GeometryShape):
@classmethod
def _numCVsFunc_generator(cls, formFunc, spansPlusDegreeFunc, spansFunc,
name=None, doc=None):
"""
Intended to be used by NurbsCurve / NurbsSurface to generate
functions which give the 'true' number of editable CVs,
as opposed to just numSpans + degree.
(The two values will differ if we have a periodic curve).
Note that this will usually need to be called outside/after the
class definition, as formFunc/spansFunc/etc will not be defined
until then, as they are added by the metaclass.
"""
def _numCvs_generatedFunc(self, editableOnly=True):
if editableOnly and formFunc(self) == self.Form.periodic:
return spansFunc(self)
else:
return spansPlusDegreeFunc(self)
if name:
_numCvs_generatedFunc.__name__ = name
if doc:
_numCvs_generatedFunc.__doc__ = doc
return _numCvs_generatedFunc
@classmethod
def _numEPsFunc_generator(cls, formFunc, spansFunc,
name=None, doc=None):
"""
Intended to be used by NurbsCurve / NurbsSurface to generate
functions which give the 'true' number of editable EPs,
as opposed to just numSpans.
(The two values will differ if we have a periodic curve).
Note that this will usually need to be called outside/after the
class definition, as formFunc/spansFunc will not be defined
until then, as they are added by the metaclass.
"""
def _numEPs_generatedFunc(self, editableOnly=True):
if editableOnly and formFunc(self) == self.Form.periodic:
return spansFunc(self)
else:
return spansFunc(self) + 1
if name:
_numEPs_generatedFunc.__name__ = name
if doc:
_numEPs_generatedFunc.__doc__ = doc
return _numEPs_generatedFunc
class ControlPoint(DeformableShape): pass
class CurveShape(DeformableShape): pass
class NurbsCurve(CurveShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'u' : general.NurbsCurveParameter,
'cv' : general.NurbsCurveCV,
'controlVerts': general.NurbsCurveCV,
'ep' : general.NurbsCurveEP,
'editPoints' : general.NurbsCurveEP,
'knot' : general.NurbsCurveKnot,
'knots' : general.NurbsCurveKnot}
# apiToMelBridge maps MFnNurbsCurve.numCVs => NurbsCurve._numCVsApi
NurbsCurve.numCVs = \
NurbsCurve._numCVsFunc_generator(NurbsCurve.form,
NurbsCurve._numCVsApi,
NurbsCurve.numSpans,
name='numCVs',
doc =
"""
Returns the number of CVs.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for cv's - ie, if
myCurve.numCVs(editableOnly=True) == 4
then allowable cv indices go from
myCurve.cv[0] to mySurf.cv[3]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve -
degree + numSpans.
These will only differ if the form is 'periodic', in which
case the editable number will be numSpans (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degree + numSpans.
:Examples:
>>> from pymel.core import *
>>> # a periodic curve
>>> myCurve = curve(name='periodicCurve1', d=3, periodic=True, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.cv
NurbsCurveCV(u'periodicCurveShape1.cv[0:7]')
>>> myCurve.numCVs()
8
>>> myCurve.numCVs(editableOnly=False)
11
>>>
>>> # an open curve
>>> myCurve = curve(name='openCurve1', d=3, periodic=False, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.cv
NurbsCurveCV(u'openCurveShape1.cv[0:10]')
>>> myCurve.numCVs()
11
>>> myCurve.numCVs(editableOnly=False)
11
:rtype: `int`
""")
NurbsCurve.numEPs = \
NurbsCurve._numEPsFunc_generator(NurbsCurve.form,
NurbsCurve.numSpans,
name='numEPs',
doc =
"""
Returns the number of EPs.
:Examples:
>>> from pymel.core import *
>>> # a periodic curve
>>> myCurve = curve(name='periodicCurve2', d=3, periodic=True, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.ep
NurbsCurveEP(u'periodicCurveShape2.ep[0:7]')
>>> myCurve.numEPs()
8
>>>
>>> # an open curve
>>> myCurve = curve(name='openCurve2', d=3, periodic=False, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.ep
NurbsCurveEP(u'openCurveShape2.ep[0:8]')
>>> myCurve.numEPs()
9
:rtype: `int`
""")
class SurfaceShape(ControlPoint): pass
class NurbsSurface(SurfaceShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'u' : (general.NurbsSurfaceRange, 'u'),
'uIsoparm' : (general.NurbsSurfaceRange, 'u'),
'v' : (general.NurbsSurfaceRange, 'v'),
'vIsoparm' : (general.NurbsSurfaceRange, 'v'),
'uv' : (general.NurbsSurfaceRange, 'uv'),
'cv' : general.NurbsSurfaceCV,
'controlVerts': general.NurbsSurfaceCV,
'ep' : general.NurbsSurfaceEP,
'editPoints' : general.NurbsSurfaceEP,
'knot' : general.NurbsSurfaceKnot,
'knots' : general.NurbsSurfaceKnot,
'sf' : general.NurbsSurfaceFace,
'faces' : general.NurbsSurfaceFace}
# apiToMelBridge maps MFnNurbsCurve._numCVsInU => NurbsCurve._numCVsInUApi
NurbsSurface.numCVsInU = \
NurbsSurface._numCVsFunc_generator(NurbsSurface.formInU,
NurbsSurface._numCVsInUApi,
NurbsSurface.numSpansInU,
name='numCVsInU',
doc =
"""
Returns the number of CVs in the U direction.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for u - ie, if
mySurf.numCVsInU(editableOnly=True) == 4
then allowable u indices go from
mySurf.cv[0][*] to mySurf.cv[3][*]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve in u -
degreeU + numSpansInU.
These will only differ if the form in u is 'periodic', in which
case the editable number will be numSpansInU (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degreeU + numSpansInU.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf1', du=3, dv=1, fu='periodic', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((7, 0), label=None)]
>>> mySurf.numCVsInU()
8
>>> mySurf.numCVsInU(editableOnly=False)
11
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf1', du=3, dv=1, fu='open', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=((4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)) )
>>> sorted(mySurf.cv[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((10, 0), label=None)]
>>> mySurf.numCVsInU()
11
>>> mySurf.numCVsInU(editableOnly=False)
11
:rtype: `int`
""")
# apiToMelBridge maps MFnNurbsCurve._numCVsInV => NurbsCurve._numCVsInVApi
NurbsSurface.numCVsInV = \
NurbsSurface._numCVsFunc_generator(NurbsSurface.formInV,
NurbsSurface._numCVsInVApi,
NurbsSurface.numSpansInV,
name='numCVsInV',
doc =
"""
Returns the number of CVs in the V direction.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for v - ie, if
mySurf.numCVsInV(editableOnly=True) == 4
then allowable v indices go from
mySurf.cv[*][0] to mySurf.cv[*][3]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve in v -
degreeV + numSpansInV.
These will only differ if the form in v is 'periodic', in which
case the editable number will be numSpansInV (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degreeV + numSpansInV.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf2', du=1, dv=3, fu='open', fv='periodic', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 7), label='cv')]
>>> mySurf.numCVsInV()
8
>>> mySurf.numCVsInV(editableOnly=False)
11
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf2', du=1, dv=3, fu='open', fv='open', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 10), label='cv')]
>>> mySurf.numCVsInV()
11
>>> mySurf.numCVsInV(editableOnly=False)
11
:rtype: `int`
""")
NurbsSurface.numEPsInU = \
NurbsSurface._numEPsFunc_generator(NurbsSurface.formInU,
NurbsSurface.numSpansInU,
name='numEPsInU',
doc =
"""
Returns the number of EPs in the U direction.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf3', du=3, dv=1, fu='periodic', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((7, 0), label=None)]
>>> mySurf.numEPsInU()
8
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf3', du=3, dv=1, fu='open', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((8, 0), label=None)]
>>> mySurf.numEPsInU()
9
:rtype: `int`
""")
NurbsSurface.numEPsInV = \
NurbsSurface._numEPsFunc_generator(NurbsSurface.formInV,
NurbsSurface.numSpansInV,
name='numEPsInV',
doc =
"""
Returns the number of EPs in the V direction.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf4', du=1, dv=3, fu='open', fv='periodic', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[0][:].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 7), label=None)]
>>> mySurf.numEPsInV()
8
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf4', du=1, dv=3, fu='open', fv='open', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[0][:].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 8), label=None)]
>>> mySurf.numEPsInV()
9
:rtype: `int`
""")
class Mesh(SurfaceShape):
"""
The Mesh class provides wrapped access to many API methods for querying and modifying meshes. Be aware that
modifying meshes using API commands outside of the context of a plugin is still somewhat uncharted territory,
so proceed at our own risk.
The component types can be accessed from the `Mesh` type (or it's transform) using the names you are
familiar with from MEL:
>>> from pymel.core import *
>>> p = polySphere( name='theMoon', sa=7, sh=7 )[0]
>>> p.vtx
MeshVertex(u'theMoonShape.vtx[0:43]')
>>> p.e
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.f
MeshFace(u'theMoonShape.f[0:48]')
They are also accessible from their more descriptive alternatives:
>>> p.verts
MeshVertex(u'theMoonShape.vtx[0:43]')
>>> p.edges
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.faces
MeshFace(u'theMoonShape.f[0:48]')
As you'd expect, these components are all indexible:
>>> p.vtx[0]
MeshVertex(u'theMoonShape.vtx[0]')
The classes themselves contain methods for getting information about the component.
>>> p.vtx[0].connectedEdges()
MeshEdge(u'theMoonShape.e[0,6,42,77]')
This class provides support for python's extended slice notation. Typical maya ranges express a start and stop value separated
by a colon. Extended slices add a step parameter and can also represent multiple ranges separated by commas.
Thus, a single component object can represent any collection of indices.
This includes start, stop, and step values.
>>> # do every other edge between 0 and 10
>>> for edge in p.e[0:10:2]:
... print edge
...
theMoonShape.e[0]
theMoonShape.e[2]
theMoonShape.e[4]
theMoonShape.e[6]
theMoonShape.e[8]
theMoonShape.e[10]
Negative indices can be used for getting indices relative to the end:
>>> p.edges # the full range
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.edges[5:-10] # index 5 through to 10 from the last
MeshEdge(u'theMoonShape.e[5:80]')
Just like with python ranges, you can leave an index out, and the logical result will follow:
>>> p.edges[:-10] # from the beginning
MeshEdge(u'theMoonShape.e[0:80]')
>>> p.edges[20:]
MeshEdge(u'theMoonShape.e[20:90]')
Or maybe you want the position of every tenth vert:
>>> for x in p.vtx[::10]:
... print x, x.getPosition()
...
theMoonShape.vtx[0] [0.270522117615, -0.900968849659, -0.339223951101]
theMoonShape.vtx[10] [-0.704405844212, -0.623489797115, 0.339223951101]
theMoonShape.vtx[20] [0.974927902222, -0.222520858049, 0.0]
theMoonShape.vtx[30] [-0.704405784607, 0.623489797115, -0.339224010706]
theMoonShape.vtx[40] [0.270522087812, 0.900968849659, 0.339223980904]
To be compatible with Maya's range notation, these slices are inclusive of the stop index.
>>> # face at index 8 will be included in the sequence
>>> for f in p.f[4:8]: print f
...
theMoonShape.f[4]
theMoonShape.f[5]
theMoonShape.f[6]
theMoonShape.f[7]
theMoonShape.f[8]
>>> from pymel.core import *
>>> obj = polyTorus()[0]
>>> colors = []
>>> for i, vtx in enumerate(obj.vtx): # doctest: +SKIP
... edgs=vtx.toEdges() # doctest: +SKIP
... totalLen=0 # doctest: +SKIP
... edgCnt=0 # doctest: +SKIP
... for edg in edgs: # doctest: +SKIP
... edgCnt += 1 # doctest: +SKIP
... l = edg.getLength() # doctest: +SKIP
... totalLen += l # doctest: +SKIP
... avgLen=totalLen / edgCnt # doctest: +SKIP
... #print avgLen # doctest: +SKIP
... currColor = vtx.getColor(0) # doctest: +SKIP
... color = datatypes.Color.black # doctest: +SKIP
... # only set blue if it has not been set before
... if currColor.b<=0.0: # doctest: +SKIP
... color.b = avgLen # doctest: +SKIP
... color.r = avgLen # doctest: +SKIP
... colors.append(color) # doctest: +SKIP
"""
__metaclass__ = _factories.MetaMayaNodeWrapper
# def __init__(self, *args, **kwargs ):
# SurfaceShape.__init__(self, self._apiobject )
# self.vtx = MeshEdge(self.__apimobject__() )
_componentAttributes = {'vtx' : general.MeshVertex,
'verts' : general.MeshVertex,
'e' : general.MeshEdge,
'edges' : general.MeshEdge,
'f' : general.MeshFace,
'faces' : general.MeshFace,
'map' : general.MeshUV,
'uvs' : general.MeshUV,
'vtxFace' : general.MeshVertexFace,
'faceVerts' : general.MeshVertexFace}
# Unfortunately, objects that don't yet have any mesh data - ie, if you do
# createNode('mesh') - can't be fed into MFnMesh (even though it is a mesh
# node). This means that all the methods wrapped from MFnMesh won't be
# usable in this case. While it might make sense for some methods - ie,
# editing methods like collapseEdges - to fail in this situation, some
# basic methods like numVertices should still be usable. Therefore,
# we override some of these with the mel versions (which still work...)
numVertices = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'vertex', 'numVertices' )
numEdges = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'edge', 'numEdges' )
numFaces = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'face', 'numFaces' )
numTriangles = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'triangles', 'numTriangles' )
numSelectedTriangles = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'triangleComponent', 'numSelectedTriangles' )
numSelectedFaces = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'faceComponent', 'numSelectedFaces' )
numSelectedEdges = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'edgeComponent', 'numSelectedEdges' )
numSelectedVertices = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'vertexComponent', 'numSelectedVertices' )
area = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'area' )
worldArea = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'worldArea' )
if versions.current() >= versions.v2009:
@_factories.addApiDocs( _api.MFnMesh, 'currentUVSetName' )
def getCurrentUVSetName(self):
return self.__apimfn__().currentUVSetName( self.instanceNumber() )
@_factories.addApiDocs( _api.MFnMesh, 'currentColorSetName' )
def getCurrentColorSetName(self):
return self.__apimfn__().currentColorSetName( self.instanceNumber() )
else:
@_factories.addApiDocs( _api.MFnMesh, 'currentUVSetName' )
def getCurrentUVSetName(self):
return self.__apimfn__().currentUVSetName()
@_factories.addApiDocs( _api.MFnMesh, 'currentColorSetName' )
def getCurrentColorSetName(self):
return self.__apimfn__().currentColorSetName()
@_factories.addApiDocs( _api.MFnMesh, 'numColors' )
def numColors(self, colorSet=None):
mfn = self.__apimfn__()
# If we have an empty mesh, we will get an MFnDagNode...
if not isinstance(mfn, _api.MFnMesh):
return 0
args = []
if colorSet:
args.append(colorSet)
return mfn.numColors(*args)
# Unfortunately, objects that don't yet have any mesh data - ie, if you do
# createNode('mesh') - can't be fed into MFnMesh (even though it is a mesh
# node). This means that all the methods wrapped from MFnMesh won't be
# usable in this case. While it might make sense for some methods - ie,
# editing methods like collapseEdges - to fail in this situation, some
# basic methods like numVertices should still be usable. Therefore,
# we override some of these with the mel versions (which still work...)
def _makeApiMethodWrapForEmptyMesh(apiMethodName, baseMethodName=None,
resultName=None, defaultVal=0):
if baseMethodName is None:
baseMethodName = '_' + apiMethodName
if resultName is None:
resultName = apiMethodName
baseMethod = getattr(Mesh, baseMethodName)
@_factories.addApiDocs( _api.MFnMesh, apiMethodName )
def methodWrapForEmptyMesh(self, *args, **kwargs):
# If we have an empty mesh, we will get an MFnDagNode...
mfn = self.__apimfn__()
if not isinstance(mfn, _api.MFnMesh):
return defaultVal
return baseMethod(self, *args, **kwargs)
methodWrapForEmptyMesh.__name__ = resultName
return methodWrapForEmptyMesh
for _apiMethodName in '''numColorSets
numFaceVertices
numNormals
numUVSets
numUVs'''.split():
_wrappedFunc = _makeApiMethodWrapForEmptyMesh(_apiMethodName)
setattr(Mesh, _wrappedFunc.__name__, _wrappedFunc)
class Subdiv(SurfaceShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'smp' : general.SubdVertex,
'verts' : general.SubdVertex,
'sme' : general.SubdEdge,
'edges' : general.SubdEdge,
'smf' : general.SubdFace,
'faces' : general.SubdFace,
'smm' : general.SubdUV,
'uvs' : general.SubdUV}
def getTweakedVerts(self, **kwargs):
return cmds.querySubdiv( action=1, **kwargs )
def getSharpenedVerts(self, **kwargs):
return cmds.querySubdiv( action=2, **kwargs )
def getSharpenedEdges(self, **kwargs):
return cmds.querySubdiv( action=3, **kwargs )
def getEdges(self, **kwargs):
return cmds.querySubdiv( action=4, **kwargs )
def cleanTopology(self):
cmds.subdCleanTopology(self)
class Lattice(ControlPoint):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'pt' : general.LatticePoint,
'points': general.LatticePoint}
class Particle(DeformableShape):
__apicls__ = _api.MFnParticleSystem
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'pt' : general.ParticleComponent,
'points': general.ParticleComponent}
# for backwards compatibility
Point = general.ParticleComponent
# for backwards compatibility, keep these two, even though the api wrap
# will also provide 'count'
def pointCount(self):
return cmds.particle( self, q=1,count=1)
num = pointCount
class SelectionSet( _api.MSelectionList):
apicls = _api.MSelectionList
__metaclass__ = _factories.MetaMayaTypeWrapper
def __init__(self, objs):
""" can be initialized from a list of objects, another SelectionSet, an MSelectionList, or an ObjectSet"""
if isinstance(objs, _api.MSelectionList ):
_api.MSelectionList.__init__(self, objs)
elif isinstance(objs, ObjectSet ):
_api.MSelectionList.__init__(self, objs.asSelectionSet() )
else:
_api.MSelectionList.__init__(self)
for obj in objs:
if isinstance(obj, (DependNode, DagNode) ):
self.apicls.add( self, obj.__apiobject__() )
elif isinstance(obj, general.Attribute):
self.apicls.add( self, obj.__apiobject__(), True )
# elif isinstance(obj, Component):
# sel.add( obj.__apiobject__(), True )
elif isinstance( obj, basestring ):
self.apicls.add( self, obj )
else:
raise TypeError
def __melobject__(self):
# If the list contains components, THEIR __melobject__ is a list -
# so need to iterate through, and flatten if needed
melList = []
for selItem in self:
selItem = selItem.__melobject__()
if _util.isIterable(selItem):
melList.extend(selItem)
else:
melList.append(selItem)
return melList
def __len__(self):
""":rtype: `int` """
return self.apicls.length(self)
def __contains__(self, item):
""":rtype: `bool` """
if isinstance(item, (DependNode, DagNode, general.Attribute) ):
return self.apicls.hasItem(self, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.hasItem(self, general.PyNode(item).__apiobject__())
def __repr__(self):
""":rtype: `str` """
names = []
self.apicls.getSelectionStrings( self, names )
return 'nt.%s(%s)' % ( self.__class__.__name__, names )
def __getitem__(self, index):
""":rtype: `PyNode` """
if index >= len(self):
raise IndexError, "index out of range"
plug = _api.MPlug()
obj = _api.MObject()
dag = _api.MDagPath()
comp = _api.MObject()
# Go from most specific to least - plug, dagPath, dependNode
try:
self.apicls.getPlug( self, index, plug )
assert not plug.isNull()
except (RuntimeError, AssertionError):
try:
self.apicls.getDagPath( self, index, dag, comp )
except RuntimeError:
try:
self.apicls.getDependNode( self, index, obj )
return general.PyNode( obj )
except:
pass
else:
if comp.isNull():
return general.PyNode( dag )
else:
return general.PyNode( dag, comp )
else:
return general.PyNode( plug )
def __setitem__(self, index, item):
if isinstance(item, (DependNode, DagNode, general.Attribute) ):
return self.apicls.replace(self, index, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.replace(self, general.PyNode(item).__apiobject__())
def __and__(self, s):
"operator for `SelectionSet.getIntersection`"
return self.getIntersection(s)
def __iand__(self, s):
"operator for `SelectionSet.intersection`"
return self.intersection(s)
def __or__(self, s):
"operator for `SelectionSet.getUnion`"
return self.getUnion(s)
def __ior__(self, s):
"operator for `SelectionSet.union`"
return self.union(s)
def __lt__(self, s):
"operator for `SelectionSet.isSubSet`"
return self.isSubSet(s)
def __gt__(self, s):
"operator for `SelectionSet.isSuperSet`"
return self.isSuperSet(s)
def __sub__(self, s):
"operator for `SelectionSet.getDifference`"
return self.getDifference(s)
def __isub__(self, s):
"operator for `SelectionSet.difference`"
return self.difference(s)
def __xor__(self, s):
"operator for `SelectionSet.symmetricDifference`"
return self.getSymmetricDifference(s)
def __ixor__(self, s):
"operator for `SelectionSet.symmetricDifference`"
return self.symmetricDifference(s)
def add(self, item):
if isinstance(item, (DependNode, DagNode, general.Attribute) ):
return self.apicls.add(self, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.add(self, general.PyNode(item).__apiobject__())
def pop(self, index):
""":rtype: `PyNode` """
if index >= len(self):
raise IndexError, "index out of range"
return self.apicls.remove(self, index )
def isSubSet(self, other):
""":rtype: `bool`"""
if isinstance(other, ObjectSet):
other = other.asSelectionSet()
return set(self).issubset(other)
def isSuperSet(self, other, flatten=True ):
""":rtype: `bool`"""
if isinstance(other, ObjectSet):
other = other.asSelectionSet()
return set(self).issuperset(other)
def getIntersection(self, other):
""":rtype: `SelectionSet`"""
# diff = self-other
# intersect = self-diff
diff = self.getDifference(other)
return self.getDifference(diff)
def intersection(self, other):
diff = self.getDifference(other)
self.difference(diff)
def getDifference(self, other):
""":rtype: `SelectionSet`"""
# create a new SelectionSet so that we don't modify our current one
newSet = SelectionSet( self )
newSet.difference(other)
return newSet
def difference(self, other):
if not isinstance( other, _api.MSelectionList ):
other = SelectionSet( other )
self.apicls.merge( self, other, _api.MSelectionList.kRemoveFromList )
def getUnion(self, other):
""":rtype: `SelectionSet`"""
newSet = SelectionSet( self )
newSet.union(other)
return newSet
def union(self, other):
if not isinstance( other, _api.MSelectionList ):
other = SelectionSet( other )
self.apicls.merge( self, other, _api.MSelectionList.kMergeNormal )
def getSymmetricDifference(self, other):
"""
Also known as XOR
:rtype: `SelectionSet`
"""
# create a new SelectionSet so that we don't modify our current one
newSet = SelectionSet( self )
newSet.symmetricDifference(other)
return newSet
def symmetricDifference(self, other):
if not isinstance( other, _api.MSelectionList ):
other = SelectionSet( other )
# FIXME: does kXOR exist? completion says only kXORWithList exists
self.apicls.merge( self, other, _api.MSelectionList.kXOR )
def asObjectSet(self):
return general.sets( self )
# def intersect(self, other):
# self.apicls.merge( other, _api.MSelectionList.kXORWithList )
class ObjectSet(Entity):
"""
The ObjectSet class and `SelectionSet` class work together. Both classes have a very similar interface,
the primary difference is that the ObjectSet class represents connections to an objectSet node, while the
`SelectionSet` class is a generic set, akin to pythons built-in `set`.
create some sets:
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> s = sets() # create an empty set
>>> s.union( ls( type='camera') ) # add some cameras to it
>>> s.members() # doctest: +SKIP
[nt.Camera(u'sideShape'), nt.Camera(u'frontShape'), nt.Camera(u'topShape'), nt.Camera(u'perspShape')]
>>> sel = s.asSelectionSet() # or as a SelectionSet
>>> sel # doctest: +SKIP
nt.SelectionSet([u'sideShape', u'frontShape', u'topShape', u'perspShape'])
>>> sorted(sel) # as a sorted list
[nt.Camera(u'frontShape'), nt.Camera(u'perspShape'), nt.Camera(u'sideShape'), nt.Camera(u'topShape')]
Operations between sets result in `SelectionSet` objects:
>>> t = sets() # create another set
>>> t.add( 'perspShape' ) # add the persp camera shape to it
>>> s.getIntersection(t)
nt.SelectionSet([u'perspShape'])
>>> diff = s.getDifference(t)
>>> diff #doctest: +SKIP
nt.SelectionSet([u'sideShape', u'frontShape', u'topShape'])
>>> sorted(diff)
[nt.Camera(u'frontShape'), nt.Camera(u'sideShape'), nt.Camera(u'topShape')]
>>> s.isSuperSet(t)
True
"""
# >>> u = sets( s&t ) # intersection
# >>> print u.elements(), s.elements()
# >>> if u < s: print "%s is a sub-set of %s" % (u, s)
#
# place a set inside another, take1
#
# >>> # like python's built-in set, the add command expects a single element
# >>> s.add( t )
#
# place a set inside another, take2
#
# >>> # like python's built-in set, the update command expects a set or a list
# >>> t.update([u])
#
# >>> # put the sets back where they were
# >>> s.remove(t)
# >>> t.remove(u)
#
# now put the **contents** of a set into another set
#
# >>> t.update(u)
#
# mixed operation between pymel.core.ObjectSet and built-in set
#
# >>> v = set(['polyCube3', 'pSphere3'])
# >>> print s.intersection(v)
# >>> print v.intersection(s) # not supported yet
# >>> u.clear()
#
# >>> delete( s )
# >>> delete( t )
# >>> delete( u )
#
#
# these will return the results of the operation as python sets containing lists of pymel node classes::
#
# s&t # s.intersection(t)
# s|t # s.union(t)
# s^t # s.symmetric_difference(t)
# s-t # s.difference(t)
#
# the following will alter the contents of the maya set::
#
# s&=t # s.intersection_update(t)
# s|=t # s.update(t)
# s^=t # s.symmetric_difference_update(t)
# s-=t # s.difference_update(t)
#
# def _elements(self):
# """ used internally to get a list of elements without casting to node classes"""
# return sets( self, q=True)
# #-----------------------
# # Maya Methods
# #-----------------------
__metaclass__ = _factories.MetaMayaNodeWrapper
#-----------------------
# Python ObjectSet Methods
#-----------------------
@classmethod
def _getApiObjs(cls, item, tryCast=True):
"""
Returns a tuple of api objects suitable (after unpacking) for
feeding to most of the MFnSet methods (ie, remove, isMember, etc)
"""
if isinstance(item, DagNode):
return ( item.__apimdagpath__(), _api.MObject() )
elif isinstance(item, (DependNode, general.Attribute) ):
return ( item.__apiobject__(), )
elif isinstance(item, general.Component):
return ( item.__apimdagpath__(), item.__apimobject__() )
elif tryCast:
return cls._getApiObjs(general.PyNode(item), tryCast=False)
else:
raise TypeError(item)
def __contains__(self, item):
""":rtype: `bool` """
return self.__apimfn__().isMember(*self._getApiObjs(item))
def __getitem__(self, index):
return self.asSelectionSet()[index]
def __len__(self):
""":rtype: `int`"""
return cmds.sets(self, q=1, size=1)
#def __eq__(self, s):
# return s == self._elements()
#def __ne__(self, s):
# return s != self._elements()
def __and__(self, s):
"operator for `ObjectSet.getIntersection`"
return self.getIntersection(s)
def __iand__(self, s):
"operator for `ObjectSet.intersection`"
return self.intersection(s)
def __or__(self, s):
"operator for `ObjectSet.getUnion`"
return self.getUnion(s)
def __ior__(self, s):
"operator for `ObjectSet.union`"
return self.union(s)
# def __lt__(self, s):
# "operator for `ObjectSet.isSubSet`"
# return self.isSubSet(s)
#
# def __gt__(self, s):
# "operator for `ObjectSet.isSuperSet`"
# return self.isSuperSet(s)
def __sub__(self, s):
"operator for `ObjectSet.getDifference`"
return self.getDifference(s)
def __isub__(self, s):
"operator for `ObjectSet.difference`"
return self.difference(s)
def __xor__(self, s):
"operator for `ObjectSet.symmetricDifference`"
return self.getSymmetricDifference(s)
def __ixor__(self, s):
"operator for `ObjectSet.symmetricDifference`"
return self.symmetricDifference(s)
#
# def subtract(self, set2):
# return sets( self, subtract=set2 )
#
# def add(self, element):
# return sets( self, add=[element] )
#
# def clear(self):
# return sets( self, clear=True )
#
# def copy(self ):
# return sets( self, copy=True )
#
# def difference(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return list(set(self.elements()).difference(elements))
#
# '''
# if isinstance(s, ObjectSet) or isinstance(s, str):
# return sets( s, subtract=self )
#
# s = sets( s )
# res = sets( s, subtract=self )
# cmds.delete(s)
# return res'''
#
# def difference_update(self, elements ):
# return sets( self, remove=elements)
#
# def discard( self, element ):
# try:
# return self.remove(element)
# except TypeError:
# pass
#
# def intersection(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).intersection(elements)
#
# def intersection_update(self, elements):
# self.clear()
# sets( self, add=self.intersections(elements) )
#
#
# def remove( self, element ):
# return sets( self, remove=[element])
#
# def symmetric_difference(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).symmetric_difference(elements)
#
# def union( self, elements ):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).union(elements)
#
# def update( self, set2 ):
# sets( self, forceElement=set2 )
def members(self, flatten=False):
"""return members as a list
:rtype: `list`
"""
return list( self.asSelectionSet(flatten) )
@_warnings.deprecated( 'Use ObjectSet.members instead', 'ObjectSet' )
def elements(self, flatten=False):
"""return members as a list
:rtype: `list`
"""
return list( self.asSelectionSet(flatten) )
def flattened(self):
"""return a flattened list of members. equivalent to `ObjectSet.members(flatten=True)`
:rtype: `list`
"""
return self.members(flatten=True)
def resetTo(self, newContents ):
"""clear and set the members to the passed list/set"""
self.clear()
self.addMembers( newContents )
def add(self, item):
return self.__apimfn__().addMember(*self._getApiObjs(item))
def remove(self, item):
try:
return self.__apimfn__().removeMember(*self._getApiObjs(item))
except RuntimeError:
# Provide a more informative error if object is not in set
if item not in self:
try:
itemStr = repr(item)
except Exception:
itemStr = 'item'
raise ValueError("%s not in set %r" % (itemStr, self))
else:
raise
def isSubSet(self, other):
""":rtype: `bool`"""
return self.asSelectionSet().isSubSet(other)
def isSuperSet(self, other ):
""":rtype: `bool`"""
return self.asSelectionSet().isSuperSet(other)
def isEqual(self, other ):
"""
do not use __eq__ to test equality of set contents. __eq__ will only tell you if
the passed object is the same node, not if this set and the passed set
have the same contents.
:rtype: `bool`
"""
return self.asSelectionSet() == SelectionSet(other)
def getDifference(self, other):
""":rtype: `SelectionSet`"""
sel = self.asSelectionSet()
sel.difference(other)
return sel
def difference(self, other):
sel = self.getDifference(other)
self.resetTo(sel)
def getSymmetricDifference(self, other):
"""also known as XOR
:rtype: `SelectionSet`
"""
sel = self.getSymmetricDifference()
sel.difference(other)
return sel
def symmetricDifference(self, other):
sel = self.symmetricDifference(other)
self.resetTo(sel)
def getIntersection(self, other):
""":rtype: `SelectionSet`"""
if isinstance(other, ObjectSet):
return self._getIntersection(other)
#elif isinstance(other, SelectionSet) or hasattr(other, '__iter__'):
selSet = self.asSelectionSet()
selSet.intersection(other)
return selSet
#raise TypeError, 'Cannot perform intersection with non-iterable type %s' % type(other)
def intersection(self, other):
sel = self.getIntersection(other)
self.resetTo(sel)
def getUnion(self, other):
""":rtype: `SelectionSet`"""
if isinstance(other, ObjectSet):
return self._getUnion(other)
selSet = self.asSelectionSet()
selSet.union(other)
return selSet
def union(self, other):
self.addMembers(other)
def isRenderable(self):
'''Mimics cmds.sets(self, q=True, renderable=True).
Alternatively you can use isinstance(someset, pm.nt.ShadingEngine)
since shadingEngine is the only renderable set in maya now
'''
return bool(cmds.sets(self, q=True, r=True))
class ShadingEngine(ObjectSet):
@classmethod
def _getApiObjs(cls, item, tryCast=True):
# Since shading groups can't contain transforms, as a convenience,
# use getShape on any transforms
if isinstance(item, Transform):
shape = item.getShape()
if shape:
return cls._getApiObjs(shape)
else:
try:
itemStr = repr(item)
except Exception:
itemStr = 'item'
raise TypeError("%s has no shape, and %s objects cannot contain Transforms" % (itemStr, cls.__name__))
else:
return super(ShadingEngine, cls)._getApiObjs(item, tryCast=tryCast)
class AnimLayer(ObjectSet):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getAttribute(self):
'''Retrieve the attributes animated on this AnimLayer
'''
# Unfortunately, cmds.animLayer('MyAnimLayer', q=1, attribute=1)
# returns none unique attribute names, ie,
# MyNode.myAttr
# even if there are foo|MyNode and bar|MyNode in the scene, and there
# doesn't seem to be a flag to tell it to give unique / full paths.
# Therefore, query it ourselves, by gettin inputs to dagSetMembers.
# Testing has shown that animLayers only use dagSetMembers, and never
# dnSetMembers - if you add a non-dag node to an animLayer, it makes
# a connection to dagSetMembers; and even if you manually make a connection
# to dnSetMembers, those connections don't seem to show up in
# animLayer(q=1, attribute=1)
return self.attr('dagSetMembers').inputs(plugs=1)
getAttributes = getAttribute
class AnimCurve(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
def addKeys(self,time,values,tangentInType='linear',tangentOutType='linear',unit=None):
if not unit:
unit = _api.MTime.uiUnit()
times = _api.MTimeArray()
for frame in time: times.append(_api.MTime(frame,unit))
keys = _api.MDoubleArray()
for value in values: keys.append(value)
return self.__apimfn__().addKeys( times, keys,
_factories.apiClassInfo['MFnAnimCurve']['enums']['TangentType']['values'].getIndex('kTangent'+tangentInType.capitalize()),
_factories.apiClassInfo['MFnAnimCurve']['enums']['TangentType']['values'].getIndex('kTangent'+tangentOutType.capitalize()))
class GeometryFilter(DependNode): pass
class SkinCluster(GeometryFilter):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getWeights(self, geometry, influenceIndex=None):
if not isinstance(geometry, general.PyNode):
geometry = general.PyNode(geometry)
if isinstance( geometry, Transform ):
try:
geometry = geometry.getShape()
except:
raise TypeError, "%s is a transform with no shape" % geometry
if isinstance(geometry, GeometryShape):
components = _api.toComponentMObject( geometry.__apimdagpath__() )
elif isinstance(geometry, general.Component):
components = geometry.__apiobject__()
else:
raise TypeError
if influenceIndex is not None:
weights = _api.MDoubleArray()
self.__apimfn__().getWeights( geometry.__apimdagpath__(), components, influenceIndex, weights )
return iter(weights)
else:
weights = _api.MDoubleArray()
index = _api.SafeApiPtr('uint')
self.__apimfn__().getWeights( geometry.__apimdagpath__(), components, weights, index() )
index = index.get()
args = [iter(weights)] * index
return itertools.izip(*args)
def setWeights(self, geometry, influnces, weights, normalize=True):
if not isinstance(geometry, general.PyNode):
geometry = general.PyNode(geometry)
if isinstance( geometry, Transform ):
try:
geometry = geometry.getShape()
except:
raise TypeError, "%s is a transform with no shape" % geometry
if isinstance(geometry, GeometryShape):
components = _api.toComponentMObject( geometry.__apimdagpath__() )
elif isinstance(geometry, general.Component):
components = geometry.__apiobject__()
else:
raise TypeError
if not isinstance(influnces,_api.MIntArray):
api_influnces = _api.MIntArray()
for influnce in influnces:
api_influnces.append(influnce)
influnces = api_influnces
if not isinstance(weights,_api.MDoubleArray):
api_weights = _api.MDoubleArray()
for weight in weights:
api_weights.append(weight)
weights = api_weights
old_weights = _api.MDoubleArray()
su = _api.MScriptUtil()
su.createFromInt(0)
index = su.asUintPtr()
self.__apimfn__().getWeights( geometry.__apimdagpath__(), components, old_weights, index )
return self.__apimfn__().setWeights( geometry.__apimdagpath__(), components, influnces, weights, normalize, old_weights )
@_factories.addApiDocs( _api.MFnSkinCluster, 'influenceObjects' )
def influenceObjects(self):
return self._influenceObjects()[1]
def numInfluenceObjects(self):
return self._influenceObjects()[0]
# TODO: if nucleus/symmetryConstraint bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of Nucleus's parent to DependNode
# - remove 2 checks in allapi.toApiObject for objects which can have an MDagPath
# but can't use MFnDagNode
if _apicache.NUCLEUS_MFNDAG_BUG:
# nucleus has a weird bug where, even though it inherits from transform, and
# can be parented in the dag, etc, you can't create an MFnTransform or
# MFnDagNode for it... therefore, hardcode it's PyNode to inherit from
# DependNode
class Nucleus(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
if _apicache.SYMMETRY_CONSTRAINT_MFNDAG_BUG:
class SymmetryConstraint(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
# TODO: if hikHandle bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of HikHandle's parent to Transform
class HikHandle(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
class JointFfd(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
class TransferAttributes(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
_factories.ApiTypeRegister.register( 'MSelectionList', SelectionSet )
def _createPyNodes():
dynModule = _util.LazyLoadModule(__name__, globals())
for mayaType, parents, children in _factories.nodeHierarchy:
if mayaType == 'dependNode':
# This seems like the more 'correct' way of doing it - only node types
# that are currently available have PyNodes created for them - but
# changing it so some PyNodes are no longer available until their
# plugin is loaded may create backwards incompatibility issues...
# if (mayaType == 'dependNode'
# or mayaType not in _factories.mayaTypesToApiTypes):
continue
parentMayaType = parents[0]
#print "superNodeType: ", superNodeType, type(superNodeType)
if parentMayaType is None:
_logger.warning("could not find parent node: %s", mayaType)
continue
#className = _util.capitalize(mayaType)
#if className not in __all__: __all__.append( className )
if _factories.isMayaType(mayaType):
_factories.addPyNode( dynModule, mayaType, parentMayaType )
sys.modules[__name__] = dynModule
# Initialize Pymel classes to API types lookup
#_startTime = time.time()
_createPyNodes()
#_logger.debug( "Initialized Pymel PyNodes types list in %.2f sec" % time.time() - _startTime )
dynModule = sys.modules[__name__]
#def listToMSelection( objs ):
# sel = _api.MSelectionList()
# for obj in objs:
# if isinstance(obj, DependNode):
# sel.add( obj.__apiobject__() )
# elif isinstance(obj, Attribute):
# sel.add( obj.__apiobject__(), True )
# elif isinstance(obj, Component):
# pass
# #sel.add( obj.__apiobject__(), True )
# else:
# raise TypeError
| bsd-3-clause | 638,128,032,992,870,800 | 36.748704 | 531 | 0.560318 | false |
google-research/google-research | optimizing_interpretability/imagenet/utils.py | 1 | 6411 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for training."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
def add_noise(input_image, noise, multiple_image_std, size=224):
"""Transformation of a single image by adding noise.
If a random gaussian distribution of noisy is specified (noise='r_normal'),
the standard deviation of the noise added is based upon the dynamic range of
the image weighed by multiple_image_std argument. This appears to work
well empirically, and is the subject of additional research.
Args:
input_image: A single input image, float32 tensor
noise: String that specifies the distribution of noise to add as either a
gaussian distribution (r_normal) or a uniform distribution (r_uniform).
multiple_image_std: Weight to place on the range of input values.
size: size of noise matrix (should match image size)
Returns:
noisy_image: The input with the addition of a noise distribution.
Raises:
ValueError: Raised if the string specifying the noise distribution does
not correspond to the noise implementations.
"""
if noise == 'r_normal':
image_min = tf.reduce_min(input_image)
image_max = tf.reduce_max(input_image)
diff = tf.reduce_mean(tf.subtract(image_max, image_min))
range_ = tf.to_float(tf.multiply(tf.constant([multiple_image_std]), diff))
noise = tf.random_normal(
shape=[size, size, 3], stddev=range_, dtype=tf.float32)
elif noise == 'r_uniform':
percentile_ = tfp.stats.percentile(input_image, q=10.)
noise = tf.random.uniform(
minval=-percentile_,
maxval=percentile_,
shape=[size, size, 3],
dtype=tf.float32)
else:
raise ValueError('Noise type not found:', noise)
noisy_image = tf.add(input_image, noise)
return noisy_image
def noise_layer(images,
labels,
multiple_image_std=0.15,
size=224,
jitter_multiplier=1,
noise='r_normal'):
"""Add noise to a subset of images in a batch.
Args:
images: The batch of images.
labels: Labels associated with images.
multiple_image_std: Weight to place on the range of input values.
size: The size of the image.
jitter_multiplier: number of images to add noise to.
noise: String that specifies the distribution of noise to add.
Returns:
noisy_images: A set of images (num_images*jitter_multiplier) with injected
noise.
tiled_labels: Associated labels for the noisy images.
"""
images_noise = tf.tile(
images, multiples=tf.constant([jitter_multiplier, 1, 1, 1], shape=[
4,
]))
noisy_images = tf.map_fn(
lambda x: add_noise(x, noise, multiple_image_std, size), images_noise)
noisy_images = tf.concat([images, noisy_images], axis=0)
tiled_labels = tf.tile(labels, tf.constant([jitter_multiplier], shape=[1]))
tiled_labels = tf.concat([labels, tiled_labels], axis=0)
return noisy_images, tiled_labels
def format_tensors(*dicts):
"""Formats metrics to be callable as tf.summary scalars on tpu's.
Args:
*dicts: A set of metric dictionaries, containing metric name + value tensor.
Returns:
A single formatted dictionary that holds all tensors.
Raises:
ValueError: if any tensor is not a scalar.
"""
merged_summaries = {}
for d in dicts:
for metric_name, value in d.items():
shape = value.shape.as_list()
if not shape:
merged_summaries[metric_name] = tf.expand_dims(value, axis=0)
elif shape == [1]:
merged_summaries[metric_name] = value
else:
raise ValueError(
'Metric {} has value {} that is not reconciliable'.format(
metric_name, value))
return merged_summaries
def host_call_fn(model_dir, **kwargs):
"""creates training summaries when using TPU.
Args:
model_dir: String indicating the output_dir to save summaries in.
**kwargs: Set of metric names and tensor values for all desired summaries.
Returns:
Summary op to be passed to the host_call arg of the estimator function.
"""
gs = kwargs.pop('global_step')[0]
with tf.contrib.create_file_writer(model_dir).as_default():
with tf.contrib.always_record_summaries():
for name, tensor in kwargs.items():
tf.summary.scalar(name, tensor[0], step=gs)
return tf.contrib.summary.all_summary_ops()
def get_lr_schedule(train_steps, num_train_images, train_batch_size):
"""learning rate schedule."""
steps_per_epoch = np.floor(num_train_images / train_batch_size)
train_epochs = train_steps / steps_per_epoch
return [ # (multiplier, epoch to start) tuples
(1.0, np.floor(5 / 90 * train_epochs)),
(0.1, np.floor(30 / 90 * train_epochs)),
(0.01, np.floor(60 / 90 * train_epochs)),
(0.001, np.floor(80 / 90 * train_epochs))
]
def learning_rate_schedule(params, current_epoch, train_batch_size,
num_train_images):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Args:
params: Python dict containing parameters for this run.
current_epoch: `Tensor` for current epoch.
train_batch_size: batch size adjusted for PIE
num_train_images: total number of train images
Returns:
A scaled `Tensor` for current learning rate.
"""
scaled_lr = params['base_learning_rate'] * (train_batch_size / 256.0)
lr_schedule = get_lr_schedule(
train_steps=params['train_steps'],
num_train_images=num_train_images,
train_batch_size=train_batch_size)
decay_rate = (
scaled_lr * lr_schedule[0][0] * current_epoch / lr_schedule[0][1])
for mult, start_epoch in lr_schedule:
decay_rate = tf.where(current_epoch < start_epoch, decay_rate,
scaled_lr * mult)
return decay_rate
| apache-2.0 | 4,932,138,354,840,355,000 | 33.842391 | 80 | 0.676182 | false |
lorddex/linux_tools | virtual_testbeds/add_hosts.py | 1 | 2360 | #!/usr/bin/python
# script that adds a VM who requests an IP address using the dhcpd to local hosts file
import sys
import subprocess
import string
import time
debug_file="/var/log/add_hosts.log"
def debug(message):
message = time.strftime("%d %b %Y %H:%M:%S") + " " + message
print message
fd = open(debug_file, "a")
fd.write(message + "\n")
fd.close()
text=""
for arg in sys.argv:
text = text +" "+arg
debug(text)
action=sys.argv[1]
ip=sys.argv[3]
mac=sys.argv[2]
hosts="/etc/hosts"
# if del action is called exit from this script
if action == "del":
# fd=open(hosts, "r")
# hosts_lines=fd.readlines()
# fd.close()
# fd=open(hosts, "w")
# for line in hosts_lines:
# if ip not in line:
# fd.write(line)
# debug( "Ok, %s deleted from %s file" % (name, hosts))
sys.exit(0)
# add address to local hosts file
#command = ["/bin/ps", "-eo", "command"]
#process = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=9192)
if len(sys.argv) == 5:
name = sys.argv[4]
debug("host name from parameters: "+name)
else:
command = "ps axo pid,command | grep /usr/bin/kvm"
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
found = None
for line in process.stdout.readlines():
pid=line.split(" ")
pid = pid[0]
fd_c = open("/proc/"+pid+"/cmdline", "r")
lines=fd_c.readlines()
fd_c.close()
if len(lines)>0:
line=lines[0]
line=string.replace(line, "-", " -")
line=string.replace(line, "\x00", " ")
else:
continue
if mac in line and "add_host" not in line:
found = line
break
if found is None:
debug("Ops, no VM with %s found" % mac)
sys.exit(1)
parms = found.split(" -")[1:]
name=False
for par in parms:
if par.strip().startswith("name"):
name = par.strip().split(" ")[1]
if name is False:
debug("Ops, VM name not found")
sys.exit(2)
fd=open(hosts, "r")
hosts_lines=fd.readlines()
fd.close()
already=False
for line in hosts_lines:
if name in line:
already=line
break
change=False
if already is not False:
if ip in line:
debug("Ok, VM already in hosts file")
sys.exit(0)
else:
change=True
if change is False:
fd=open(hosts, "a")
fd.write(ip + "\t\t" + name +"\n")
else:
fd=open(hosts, "w")
for line in hosts_lines:
if name in line:
line = ip + "\t\t" + name + "\n"
fd.write(line)
fd.close()
debug( "Ok, %s added to %s file" % (name, hosts))
| mit | 8,233,084,119,662,574,000 | 20.454545 | 86 | 0.642373 | false |
nioo-knaw/hydra | conf.py | 1 | 7810 | import logging
import multiprocessing
import re
import os
import tempfile
import yaml
import sys
from collections import OrderedDict
import click
import urllib
# Adapted from: https://github.com/pnnl/atlas/blob/master/atlas/conf.py
logging.basicConfig(level=logging.INFO, datefmt="%Y-%m-%d %H:%M", format="[%(asctime)s %(levelname)s] %(message)s")
host = "ftp.sra.ebi.ac.uk"
project = "PRJEB14409"
#project = "PRJNA319605"
# http://stackoverflow.com/a/3675423
def replace_last(source_string, replace_what, replace_with):
head, _sep, tail = source_string.rpartition(replace_what)
if _sep == '':
return tail
else:
return head + replace_with + tail
def get_ena(project):
from urllib import request
samples = ""
try:
samples = request.urlopen("http://www.ebi.ac.uk/ena/data/warehouse/filereport?accession=%s&result=read_run&fields=fastq_ftp" % project).readlines()[1:]
except urllib.error.HTTPError:
print("Not a valid ENA project")
for sample in samples:
for fastq in sample.strip().split(b';'):
dirpath = os.path.dirname(fastq).decode("utf-8")
filename = os.path.basename(fastq).decode("utf-8")
yield (dirpath,"",[filename])
def get_sample_files(path, remote):
samples = OrderedDict()
seen = set()
walker = ""
if remote != None:
walker = get_ena(remote)
else:
walker = os.walk(path, followlinks=True)
for dir_name, sub_dirs, files in walker:
for fname in files:
if ".fastq" in fname or ".fq" in fname:
sample_id = fname.partition(".fastq")[0]
if ".fq" in sample_id:
sample_id = fname.partition(".fq")[0].replace("_","-")
sample_id = sample_id.replace("_R1", "").replace("_r1", "").replace("_R2", "").replace("_r2", "")
sample_id = re.sub("_1$", "", sample_id)
sample_id = re.sub("_2$", "", sample_id)
sample_id = sample_id.replace("_", "-").replace(" ", "-")
fq_path = os.path.join(dir_name, fname)
fastq_paths = [fq_path]
if fq_path in seen: continue
if "_R1" in fname or "_r1" in fname or "_1" in fname:
fname = replace_last(fname,"_1.","_2.")
r2_path = os.path.join(dir_name, fname.replace("_R1", "_R2").replace("_r1", "_r2"))
if not r2_path == fq_path:
seen.add(r2_path)
fastq_paths.append(r2_path)
if "_R2" in fname or "_r2" in fname or "_2" in fname:
fname = replace_last(fname,"_2.","_1.")
r1_path = os.path.join(dir_name, fname.replace("_R2", "_R1").replace("_r2", "_r1"))
if not r1_path == fq_path:
seen.add(r1_path)
fastq_paths.insert(0, r1_path)
if sample_id in samples:
logging.warn("Duplicate sample %s was found after renaming; skipping..." % sample_id)
continue
samples[sample_id] = {'path': fastq_paths }
return samples
def create_metadata_template(outfile, samples):
with open(outfile, "w") as f:
print("#SampleID\tAlias", file=f)
for sample in samples:
print("%s\t%s" % (sample,sample), file=f)
@click.command()
@click.option('--project', prompt="Give your project a unique name", required=True, help='Give your project a nice name')
@click.option('--config', default="config.yaml", show_default=True, help='File to write the configuration to')
@click.option('--remote', help='Specify a ENA project to use as remote data (for example PRJEB14409')
@click.option('--path', default="../data", show_default=True, help='path to data folder')
@click.option('--rename', required=False, help='provide a file for renaming samples')
@click.option('--forward_primer', prompt="Which forward primer did you use?", required=True, default="CCTACGGGNGGCWGCAG", help="Which forward primer did you use?")
@click.option('--reverse_primer', prompt="Which reverse primer did you use?", required=True, default="GACTACHVGGGTATCTAATCC", help="Which reverse primer did you use?")
@click.option('--mergepairs', prompt="Choose wich method to use for stitching paired reads (vsearch, pandaseq)", required=True, default="vsearch", type=click.Choice(['pandaseq', 'vsearch', 'none']), help="Choose wich method to use for stitching paired reads")
@click.option('--classification', prompt="Choose wich classification option you want to use (sina, stampa, rdp, blast)", required=True, type=click.Choice(['sina', 'stampa', 'rdp', 'blast']), help="Choose wich classification option you want to use")
@click.option('--reference_db', prompt="Choose wich reference database to use (silva, unite)", required=True, type=click.Choice(['silva', 'unite']), help="Choose wich reference database to use")
@click.option('--clustering', prompt="Choose wich clustering method you want to use (usearch_smallmem, swarm)", required=True, default="usearch_smallmem", type=click.Choice(['usearch_smallmem', 'swarm']), help="Choose wich clustering method you want to use")
def make_config(project,config,path,remote, rename, forward_primer, reverse_primer, mergepairs, classification, reference_db, clustering):
"""Write the file `config` and complete the sample names and paths for all files in `path`."""
represent_dict_order = lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data.items())
yaml.add_representer(OrderedDict, represent_dict_order)
path = os.path.realpath(path)
conf = OrderedDict()
samples = get_sample_files(path, remote)
if rename:
renamed = 0
for line in open(rename):
sample, newname = line.split()
if sample in samples:
newname = newname.replace("_","-")
samples[newname] = samples.pop(sample)
renamed += 1
create_metadata_template("metadata.txt", samples.keys())
logging.info("Found %d samples under %s" % (len(samples), path if remote == None else "remote project %s " % remote))
if rename:
logging.info("Renamed %d samples" % renamed)
conf["project"] = project
conf["minsize"] = 2
conf["adapters_fasta"] = "/data/ngs/adapters/contaminant_list.txt"
conf["pandaseq_overlap"] = "10"
conf["pandaseq_quality"] = "25"
conf["pandaseq_minlength"] = "100"
conf["pandaseq_maxlength"] = "700"
conf["quality_control"] = OrderedDict()
conf["quality_control"]["barcode"] = OrderedDict()
conf["quality_control"]["barcode"]["threshold"] = 5
conf["quality_control"]["barcode"]["length"] = 8
conf["quality_control"]["barcode"]["seperator"] = "#"
conf["quality_control"]["trimming"] = OrderedDict()
conf["quality_control"]["trimming"]["quality"] = 25
conf["forward_primer"] = forward_primer
conf["reverse_primer"] = reverse_primer
conf["mergepairs"] = mergepairs
conf["vsearch_minmergelen"] = "200"
conf["metadata"] = "metadata.txt"
if remote != None:
conf["remote"] = True
else:
conf["remote"] = False
conf["barcode_in_header"] = False
conf["its"] = False
conf["its_region"] = "ITS2"
conf["clustering"] = clustering
conf["classification"] = classification
conf["use_full_lineage"] = False
conf["rdp_confidence_cutoff"] = 0.80
conf["reference_db"] = reference_db
conf["convert_to_casava1.8"] = False
conf["data"] = samples
with open(config, "w") as f:
print(yaml.dump(conf, default_flow_style=False), file=f)
logging.info("Configuration file written to %s" % config)
if __name__ == "__main__":
make_config()
| mit | 9,045,070,109,480,767,000 | 43.628571 | 259 | 0.61831 | false |
jromang/retina | gui/workspace.py | 1 | 1423 | # Copyright (C) 2013-2016 Jean-Francois Romang ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#TODO : inherit from 'workspaceobject' - abstract hide/show method
# ou "add"/"move" pethod avec des qwidgets
max_workspaces=4
workspace_objects= set()
current = 0
buttons = []
def add(qwidget, workspace_id=None):
workspace_objects.add(qwidget)
qwidget.workspace=current if workspace_id is None else workspace_id
#print("qwidget workspace:"+str(qwidget.workspace))
def switch(workspace_id):
#print("switch to:"+str(workspace_id))
global current
current=workspace_id
for widget in workspace_objects:
if widget.workspace==workspace_id: widget.show()
else: widget.hide()
for button in buttons:
button.setChecked(False)
buttons[workspace_id].setChecked(True)
| gpl-3.0 | -2,353,139,725,358,365,700 | 33.707317 | 71 | 0.735067 | false |
Hao-Liu/avocado | selftests/unit/test_xunit.py | 1 | 1945 | import argparse
import unittest
import os
import sys
from xml.dom import minidom
import tempfile
import shutil
from avocado import Test
from avocado.core.plugins import xunit
from avocado.core import job
class ParseXMLError(Exception):
pass
class _Stream(object):
def start_file_logging(self, param1, param2):
pass
def stop_file_logging(self):
pass
def set_tests_info(self, info):
pass
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def set_test_status(self, status, state):
pass
class xUnitSucceedTest(unittest.TestCase):
def setUp(self):
class SimpleTest(Test):
def test(self):
pass
self.tmpfile = tempfile.mkstemp()
self.tmpdir = tempfile.mkdtemp()
args = argparse.Namespace()
args.xunit_output = self.tmpfile[1]
self.test_result = xunit.xUnitTestResult(stream=_Stream(), args=args)
self.test_result.start_tests()
self.test1 = SimpleTest(job=job.Job(), base_logdir=self.tmpdir)
self.test1.status = 'PASS'
self.test1.time_elapsed = 1.23
def tearDown(self):
os.close(self.tmpfile[0])
os.remove(self.tmpfile[1])
shutil.rmtree(self.tmpdir)
def testAddSuccess(self):
self.test_result.start_test(self.test1)
self.test_result.end_test(self.test1.get_state())
self.test_result.end_tests()
self.assertTrue(self.test_result.xml)
with open(self.test_result.output) as fp:
xml = fp.read()
try:
dom = minidom.parseString(xml)
except Exception, details:
raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
self.assertTrue(dom)
els = dom.getElementsByTagName('testcase')
self.assertEqual(len(els), 1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -2,193,562,886,374,426,000 | 23.3125 | 95 | 0.62108 | false |
reneetrei/agile-bayou-76491 | snipts/urls.py | 1 | 2076 | from django.conf.urls import *
from snipts import views
urlpatterns = \
patterns('',
url(r'^s/(?P<snipt_key>[^/]+)/(?P<lexer>[^\?]+)?$',
views.redirect_snipt, name='redirect-snipt'),
url(r'^(?P<username>[^/]+)/feed/$',
views.redirect_user_feed,
name='redirect-feed'),
url(r'^public/tag/(?P<tag_slug>[^/]+)/feed/$',
views.redirect_public_tag_feed,
name='redirect-public-tag-feed'),
url(r'^(?P<username>[^/]+)/tag/(?P<tag_slug>[^/]+)/feed/$',
views.redirect_user_tag_feed,
name='redirect-user-tag-feed'),
url(r'^public/$',
views.list_public,
name='list-public'),
url(r'^public/tag/(?P<tag_slug>[^/]+)/$',
views.list_public,
name='list-public-tag'),
url(r'^download/(?P<snipt_key>[^/]+).*$',
views.download,
name='download'),
url(r'^embed/(?P<snipt_key>[^/]+)/$',
views.embed,
name='embed'),
url(r'^raw/(?P<snipt_key>[^/]+)/(?P<lexer>[^\?]+)?$',
views.raw,
name='raw'),
url(r'^report-spam/(?P<snipt_id>[^/]+)/$',
views.report_spam,
name='report-spam'),
url(r'^(?P<username_or_custom_slug>[^/]+)/$',
views.list_user,
name='list-user'),
url(r'^(?P<username_or_custom_slug>[^/]+)/tag/(?P<tag_slug>[^/]+)/$',
views.list_user,
name='list-user-tag'),
url(r'^(?P<username>[^/]+)/favorites/$',
views.favorites,
name='favorites'),
url(r'^(?P<username>[^/]+)/blog-posts/$',
views.blog_posts,
name='blog-posts'),
url(r'^(?P<username>[^/]+)/(?P<snipt_slug>[^/]+)/$',
views.detail,
name='detail'))
| mit | -6,723,413,863,394,528,000 | 40.52 | 82 | 0.406069 | false |
frac/celery | celery/signals.py | 1 | 6128 | """
==============
celery.signals
==============
Signals allows decoupled applications to receive notifications when
certain actions occur elsewhere in the application.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
.. contents::
:local:
.. _signal-basics:
Basics
======
Several kinds of events trigger signals, you can connect to these signals
to perform actions as they trigger.
Example connecting to the :signal:`task_sent` signal:
.. code-block:: python
from celery.signals import task_sent
def task_sent_handler(sender=None, task_id=None, task=None, args=None,
kwargs=None, **kwds):
print("Got signal task_sent for task id %s" % (task_id, ))
task_sent.connect(task_sent_handler)
Some signals also have a sender which you can filter by. For example the
:signal:`task_sent` signal uses the task name as a sender, so you can
connect your handler to be called only when tasks with name `"tasks.add"`
has been sent by providing the `sender` argument to
:class:`~celery.utils.dispatch.signal.Signal.connect`:
.. code-block:: python
task_sent.connect(task_sent_handler, sender="tasks.add")
.. _signal-ref:
Signals
=======
Task Signals
------------
.. signal:: task_sent
task_sent
~~~~~~~~~
Dispatched when a task has been sent to the broker.
Note that this is executed in the client process, the one sending
the task, not in the worker.
Sender is the name of the task being sent.
Provides arguments:
* task_id
Id of the task to be executed.
* task
The task being executed.
* args
the tasks positional arguments.
* kwargs
The tasks keyword arguments.
* eta
The time to execute the task.
* taskset
Id of the taskset this task is part of (if any).
.. signal:: task_prerun
task_prerun
~~~~~~~~~~~
Dispatched before a task is executed.
Sender is the task class being executed.
Provides arguments:
* task_id
Id of the task to be executed.
* task
The task being executed.
* args
the tasks positional arguments.
* kwargs
The tasks keyword arguments.
.. signal:: task_postrun
task_postrun
~~~~~~~~~~~~
Dispatched after a task has been executed.
Sender is the task class executed.
Provides arguments:
* task_id
Id of the task to be executed.
* task
The task being executed.
* args
The tasks positional arguments.
* kwargs
The tasks keyword arguments.
* retval
The return value of the task.
.. signal:: task_failure
task_failure
~~~~~~~~~~~~
Dispatched when a task fails.
Sender is the task class executed.
Provides arguments:
* task_id
Id of the task.
* exception
Exception instance raised.
* args
Positional arguments the task was called with.
* kwargs
Keyword arguments the task was called with.
* traceback
Stack trace object.
* einfo
The :class:`celery.datastructures.ExceptionInfo` instance.
Worker Signals
--------------
.. signal:: worker_init
worker_init
~~~~~~~~~~~
Dispatched before the worker is started.
.. signal:: worker_ready
worker_ready
~~~~~~~~~~~~
Dispatched when the worker is ready to accept work.
.. signal:: worker_process_init
worker_process_init
~~~~~~~~~~~~~~~~~~~
Dispatched by each new pool worker process when it starts.
.. signal:: worker_shutdown
worker_shutdown
~~~~~~~~~~~~~~~
Dispatched when the worker is about to shut down.
Celerybeat Signals
------------------
.. signal:: beat_init
beat_init
~~~~~~~~~
Dispatched when celerybeat starts (either standalone or embedded).
Sender is the :class:`celery.beat.Service` instance.
.. signal:: beat_embedded_init
beat_embedded_init
~~~~~~~~~~~~~~~~~~
Dispatched in addition to the :signal:`beat_init` signal when celerybeat is
started as an embedded process. Sender is the
:class:`celery.beat.Service` instance.
Eventlet Signals
----------------
.. signal:: eventlet_pool_started
eventlet_pool_started
~~~~~~~~~~~~~~~~~~~~~
Sent when the eventlet pool has been started.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
.. signal:: eventlet_pool_preshutdown
eventlet_pool_preshutdown
~~~~~~~~~~~~~~~~~~~~~~~~~
Sent when the worker shutdown, just before the eventlet pool
is requested to wait for remaining workers.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
.. signal:: eventlet_pool_postshutdown
eventlet_pool_postshutdown
~~~~~~~~~~~~~~~~~~~~~~~~~~
Sent when the pool has been joined and the worker is ready to shutdown.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
.. signal:: eventlet_pool_apply
eventlet_pool_apply
~~~~~~~~~~~~~~~~~~~
Sent whenever a task is applied to the pool.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
Provides arguments:
* target
The target function.
* args
Positional arguments.
* kwargs
Keyword arguments.
"""
from celery.utils.dispatch import Signal
task_sent = Signal(providing_args=["task_id", "task",
"args", "kwargs",
"eta", "taskset"])
task_prerun = Signal(providing_args=["task_id", "task",
"args", "kwargs"])
task_postrun = Signal(providing_args=["task_id", "task",
"args", "kwargs", "retval"])
task_failure = Signal(providing_args=["task_id", "exception",
"args", "kwargs", "traceback",
"einfo"])
worker_init = Signal(providing_args=[])
worker_process_init = Signal(providing_args=[])
worker_ready = Signal(providing_args=[])
worker_shutdown = Signal(providing_args=[])
setup_logging = Signal(providing_args=["loglevel", "logfile",
"format", "colorize"])
beat_init = Signal(providing_args=[])
beat_embedded_init = Signal(providing_args=[])
eventlet_pool_started = Signal(providing_args=[])
eventlet_pool_preshutdown = Signal(providing_args=[])
eventlet_pool_postshutdown = Signal(providing_args=[])
eventlet_pool_apply = Signal(providing_args=["target", "args", "kwargs"])
| bsd-3-clause | -1,422,268,699,349,315,800 | 19.426667 | 75 | 0.6578 | false |
fullmetalfelix/ML-CSC-tutorial | data/descriptor_codes/charge.mbtr.py | 1 | 2490 | from __future__ import print_function
from describe.descriptors import LMBTR
from describe.core import System
from describe.data.element_data import numbers_to_symbols
import numpy as np
from scipy.sparse import lil_matrix, save_npz
from read_binary import *
data = read_b('../binary/database-mulliken-ccsd-spd.bin')
decay_factor = 0.5
mbtr = LMBTR(
atom_index = 1,
atomic_numbers=[1, 6, 7, 8, 9],
k=[1, 2, 3],
periodic=False,
grid={
"k1": {
"min": 0,
"max": 10,
"sigma": 0.1,
"n": 11,
},
"k2": {
"min": 1/7,
"max": 1.5,
"sigma": 0.01,
"n": 50,
},
"k3": {
"min": -1.0,
"max": 1.0,
"sigma": 0.05,
"n": 50,
}
},
weighting={
"k2": {
"function": lambda x: np.exp(-decay_factor*x),
"threshold": 1e-3
},
"k3": {
"function": lambda x: np.exp(-decay_factor*x),
"threshold": 1e-3
},
},
flatten=True)
mbtr_nfeat = mbtr.get_number_of_features()
elements_list = [1, 6, 7, 8, 9]
chg = np.empty(len(elements_list), dtype='object')
max_chg_count = [10000]*4+[3314]
for i, j in enumerate(max_chg_count):
chg[i] = lil_matrix((j, mbtr_nfeat))
chg_count = np.zeros(len(elements_list), dtype='int')
for atom_ind, atoms in enumerate(data):
atoms_sys = System(positions=atoms.coords, numbers=atoms.Zs)
elements_req = np.array(elements_list)[chg_count != max_chg_count].tolist()
print('\r {}'.format(chg_count), end = '')
for element in elements_req:
element_ind = elements_list.index(element)
if chg_count[element_ind] != max_chg_count[element_ind] and element in atoms.Zs:
element_indx_atoms = np.where(atoms.Zs == element)[0]
len_added = min(element_indx_atoms.shape[0], max_chg_count[element_ind]-chg_count[element_ind])
for i in range(chg_count[element_ind], chg_count[element_ind]+len_added):
mbtr.atom_index = element_indx_atoms[i - chg_count[element_ind]]
chg[element_ind][i] = mbtr.create(atoms_sys)
chg_count[element_ind] += len_added
if np.sum(chg_count) == sum(max_chg_count):
break
for i, j in enumerate(elements_list):
save_npz('../charge.{}.input.mbtr'.format(j), chg[i].tocsr())
| gpl-3.0 | 3,026,851,773,269,828,600 | 29.740741 | 107 | 0.538554 | false |
ideal/drummer | drummer/common.py | 1 | 1045 | #
# Copyright (C) 2016 Shang Yuanchun <[email protected]>
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# drummer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with drummer. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
#
"""Common functions for Drummer :("""
import pkg_resources
def get_version():
"""
Returns the version of drummer from the python egg metadata
:returns: the version of drummer
"""
try:
return pkg_resources.require("drummer")[0].version
except:
return "dev"
| gpl-3.0 | -9,059,040,297,306,448,000 | 26.5 | 67 | 0.712919 | false |
arnau-prat/My-Raspersonal-assistant | assets/brain.py | 1 | 1572 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import threading
import time
import subprocess
import os
import signal
from datetime import datetime
from modules import tracker, calendar, alarm, wolfram, music
class Brain:
def __init__(self):
self.tracker = tracker.Tracker()
self.alarm = alarm.Alarm()
self.calendar = calendar.Calendar()
self.wolfram = wolfram.Wolfram()
self.music = music.Music()
def think(self, text):
if ("timer" in text) | ("alarm" in text):
response = self.alarm.think(text)
elif ("time" in text):
response = datetime.now().strftime("It's %I:%M%p")
elif ("day" in text) | ("date" in text):
response = datetime.now().strftime("%A %d of %B")
elif ("music" in text) | ("play" in text):
response = self.music.play()
elif ("take" in text) | ("photo" in text):
response = "taking picture"
image = cv2.imread("/home/pi/Desktop/im.jpg")
image = cv2.resize(image,(800,600))
cv2.imwrite("/hoe/pi/Desktop/def.jpg",image)
time.sleep(1)
os.system ('mpg321 assets/camera_shutter.mp3')
elif ("wake" in text) | ("up" in text):
self.tracker.start()
response = "I'm waking up sir"
elif ("down" in text) | ("sleep" in text):
self.tracker.stop()
response = "I'm going to sleep now"
elif "calendar" in text:
response = self.calendar.think(text)
else:
response = self.wolfram.think(text)
return response
| mit | -7,316,954,540,866,034,000 | 25.2 | 62 | 0.583333 | false |
leosartaj/autosign | tests/test_removeSign.py | 1 | 1228 | #!/usr/bin/env python2
##
# autosign
# https://github.com/leosartaj/autosign.git
#
# copyright (c) 2014 sartaj singh
# licensed under the mit license.
##
import unittest
import os, shutil
import helper
from autosign.main import removeSign, isSign
from autosign.exce import UnsignedError
class TestremoveSign(unittest.TestCase):
"""
tests the removeSign function in main module
"""
def setUp(self):
self.dire = os.path.dirname(__file__)
self.signedfile = os.path.join(self.dire, 'testData/toBeSigned.py')
self.signed = os.path.join(self.dire, 'testData/test_signedfile.py')
shutil.copyfile(self.signedfile, self.signed)
self.unsigned = os.path.join(self.dire, 'testData/test_unsignedfile.py')
helper.newFile(self.unsigned)
helper.readrc(self)
def test_remove_from_unsigned_file(self):
self.assertRaises(UnsignedError, removeSign, self.unsigned, self.options_py)
def test_remove_from_signed_file(self):
self.assertTrue(isSign(self.signed, self.options_py))
removeSign(self.signed, self.options_py)
self.assertFalse(isSign(self.signed, self.options_py))
def tearDown(self):
os.remove(self.unsigned)
| mit | 1,194,473,589,334,884,000 | 29.7 | 84 | 0.69544 | false |
tomadasocial/tomada-social | tomadasocial/settings.py | 1 | 3965 | """
Django settings for tomadasocial project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!qy$6$mh%b2mp$)km*!^uaf-v%givqnzzndo0b)y)qo93p973_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_mongoengine',
'account',
'evento',
'conta',
'event',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tomadasocial.urls'
WSGI_APPLICATION = 'tomadasocial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
AUTHENTICATION_BACKENDS = (
'mongoengine.django.auth.MongoEngineBackend',
)
SESSION_ENGINE = 'mongoengine.django.sessions'
SESSION_SERIALIZER = 'mongoengine.django.sessions.BSONSerializer'
# from mongoengine import connect
# connect('records', username='recordsUserAdmin', password='password')
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
from mongoengine import connect
# connect('admin', username='admin', password='senha')
#connect("cc_bQWsNhAJvOH", host='mongodb://bQWsNhAJvOHi:[email protected]:31904/cc_bQWsNhAJvOHi')
connect("heroku_app33947277", host='mongodb://tomadasocial:[email protected]:61681/heroku_app33947277')
#connect("admin", host='mongodb://admin:[email protected]:27017/admin') | gpl-2.0 | -515,293,318,295,498,900 | 28.819549 | 123 | 0.735183 | false |
zostera/django-bootstrap4 | tests/test_settings.py | 1 | 2426 | from django.test import TestCase, override_settings
from bootstrap4.bootstrap import get_bootstrap_setting, include_jquery, jquery_slim_url, jquery_url
class SettingsTest(TestCase):
def test_get_bootstrap_setting(self):
self.assertIsNone(get_bootstrap_setting("SETTING_DOES_NOT_EXIST"))
self.assertEqual("not none", get_bootstrap_setting("SETTING_DOES_NOT_EXIST", "not none"))
# Override a setting
with self.settings(BOOTSTRAP4={"SETTING_DOES_NOT_EXIST": "exists now"}):
self.assertEqual(get_bootstrap_setting("SETTING_DOES_NOT_EXIST"), "exists now")
def test_jquery_url(self):
self.assertEqual(
jquery_url(),
{
"url": "https://code.jquery.com/jquery-3.5.1.min.js",
"integrity": "sha384-ZvpUoO/+PpLXR1lu4jmpXWu80pZlYUAfxl5NsBMWOEPSjUn/6Z/hRTt8+pR6L4N2",
"crossorigin": "anonymous",
},
)
@override_settings(
BOOTSTRAP4={
"jquery_url": {
"url": "https://example.com/jquery.js",
"integrity": "we-want-a-different-jquery",
"crossorigin": "anonymous",
},
}
)
def test_jquery_url_from_settings(self):
self.assertEqual(
jquery_url(),
{
"url": "https://example.com/jquery.js",
"integrity": "we-want-a-different-jquery",
"crossorigin": "anonymous",
},
)
def test_jquery_slim_url(self):
self.assertEqual(
jquery_slim_url(),
{
"url": "https://code.jquery.com/jquery-3.5.1.slim.min.js",
"integrity": "sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj",
"crossorigin": "anonymous",
},
)
def test_include_jquery(self):
self.assertEqual(include_jquery(), False)
with self.settings(BOOTSTRAP4={"include_jquery": False}):
self.assertEqual(include_jquery(), False)
with self.settings(BOOTSTRAP4={"include_jquery": True}):
self.assertEqual(include_jquery(), True)
with self.settings(BOOTSTRAP4={"include_jquery": "full"}):
self.assertEqual(include_jquery(), "full")
with self.settings(BOOTSTRAP4={"include_jquery": "slim"}):
self.assertEqual(include_jquery(), "slim")
| bsd-3-clause | 1,537,482,120,237,771,500 | 38.129032 | 103 | 0.576257 | false |
tmaiwald/OSIM | OSIM/Optimizations/OptimizationComponents/Optimizable.py | 1 | 1217 |
class Optimizable(object):
def __init__(self,comp_names_list,paramname,valfrom,valto,**kwargs):
self.names = comp_names_list
self.paramname = paramname
self.minStep = 2 #default
self.vFrom = valfrom
self.vTo = valto
self.val = 0
for name, value in kwargs.items():
if name == 'minSteps':
self.minStep = value
def setValue(self, v):
self.val = v
def getRangeBegin(self):
return self.vFrom
def getRangeEnd(self):
return self.vTo
def getValue(self):
return self.val
def getOptimizableComponentNames(self):
return self.names
def toString(self):
stri = ""
for n in self.names:
stri = stri+" "+n
return (stri+" at %s"%(str(self.val)))
def getParamName(self):
return self.paramname
@staticmethod
def getSetableList(olist):
setableList = list()
for o in olist:
for n in o.getOptimizableComponentNames():
"""compname, paramname, paramval"""
n = [n, o.getParamName(), o.getValue()]
setableList.append(n)
return setableList
| bsd-2-clause | 6,869,095,868,445,226,000 | 22.862745 | 72 | 0.557108 | false |
pymedusa/SickRage | medusa/init/logconfig.py | 1 | 2929 | # coding=utf-8
"""Monkey-patch logger functions to accept enhanced format styles."""
from __future__ import unicode_literals
import logging
from builtins import object
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
from six import text_type
class StyleAdapter(logging.LoggerAdapter):
"""Logger Adapter with new string format style."""
adapter_members = {attr: attr for attr in dir(logging.LoggerAdapter) if not callable(attr) and
not attr.startswith('__')}
adapter_members.update({'warn': 'warning', 'fatal': 'critical'})
reserved_keywords = getfullargspec(logging.Logger._log).args[1:]
def __init__(self, target_logger, extra=None):
"""Constructor.
:param target_logger:
:type target_logger: logging.Logger
:param extra:
:type extra: dict
"""
super(StyleAdapter, self).__init__(target_logger, extra)
def __getattr__(self, name):
"""Wrap to the actual logger.
:param name:
:type name: str
:return:
"""
if name not in self.adapter_members:
return getattr(self.logger, name)
return getattr(self, self.adapter_members[name])
def __setattr__(self, key, value):
"""Wrap to the actual logger.
:param key:
:type key: str
:param value:
"""
self.__dict__[key] = value
def process(self, msg, kwargs):
"""Enhance default process to use BraceMessage and remove unsupported keyword args for the actual logger method.
:param msg:
:param kwargs:
:return:
"""
reserved = {k: kwargs[k] for k in self.reserved_keywords if k in kwargs}
kwargs = {k: kwargs[k] for k in kwargs if k not in self.reserved_keywords}
return BraceMessage(msg, (), kwargs), reserved
class BraceMessage(object):
"""Log Message wrapper that applies new string format style."""
def __init__(self, fmt, args, kwargs):
"""Constructor.
:param fmt:
:type fmt: logging.Formatter
:param args:
:param kwargs:
"""
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
"""Represent a string.
:return:
:rtype: str
"""
result = text_type(self.fmt)
return result.format(*self.args, **self.kwargs) if self.args or self.kwargs else result
def initialize():
"""Replace standard getLogger with our enhanced one."""
def enhanced_get_logger(name=None):
"""Enhanced logging.getLogger function.
:param name:
:return:
"""
return StyleAdapter(standard_logger(name))
logging.getLogger = enhanced_get_logger
# Keeps the standard logging.getLogger to be used by StyleAdapter
standard_logger = logging.getLogger
| gpl-3.0 | 4,170,883,745,947,611,000 | 26.632075 | 120 | 0.613861 | false |
zagl/led-tool | ui_main.py | 1 | 27811 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_main.ui'
#
# Created: Sun Apr 3 16:50:44 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(687, 562)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.main_stacked_widget = QtGui.QStackedWidget(self.centralwidget)
self.main_stacked_widget.setObjectName(_fromUtf8("main_stacked_widget"))
self.main_page = QtGui.QWidget()
self.main_page.setMinimumSize(QtCore.QSize(669, 544))
self.main_page.setObjectName(_fromUtf8("main_page"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.main_page)
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(-1, -1, 0, -1)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.led_filter = QtGui.QLineEdit(self.main_page)
self.led_filter.setObjectName(_fromUtf8("led_filter"))
self.verticalLayout_3.addWidget(self.led_filter)
self.led_list_view = QtGui.QListView(self.main_page)
self.led_list_view.setIconSize(QtCore.QSize(0, 0))
self.led_list_view.setGridSize(QtCore.QSize(0, 0))
self.led_list_view.setObjectName(_fromUtf8("led_list_view"))
self.verticalLayout_3.addWidget(self.led_list_view)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.add_led_button = QtGui.QToolButton(self.main_page)
self.add_led_button.setMinimumSize(QtCore.QSize(32, 32))
self.add_led_button.setAutoRaise(False)
self.add_led_button.setObjectName(_fromUtf8("add_led_button"))
self.horizontalLayout_5.addWidget(self.add_led_button)
self.remove_led_button = QtGui.QToolButton(self.main_page)
self.remove_led_button.setMinimumSize(QtCore.QSize(32, 32))
self.remove_led_button.setAutoRaise(False)
self.remove_led_button.setObjectName(_fromUtf8("remove_led_button"))
self.horizontalLayout_5.addWidget(self.remove_led_button)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.gridLayout.setContentsMargins(14, -1, -1, -1)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.voltage_label = QtGui.QLabel(self.main_page)
self.voltage_label.setMinimumSize(QtCore.QSize(0, 0))
self.voltage_label.setObjectName(_fromUtf8("voltage_label"))
self.gridLayout.addWidget(self.voltage_label, 6, 1, 1, 1)
self.label = QtGui.QLabel(self.main_page)
self.label.setMinimumSize(QtCore.QSize(0, 30))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 2, 0, 1, 1)
self.current_spinbox = QtGui.QDoubleSpinBox(self.main_page)
self.current_spinbox.setMaximum(9999999.0)
self.current_spinbox.setObjectName(_fromUtf8("current_spinbox"))
self.gridLayout.addWidget(self.current_spinbox, 2, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.main_page)
self.label_8.setMinimumSize(QtCore.QSize(0, 30))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 8, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.main_page)
self.label_10.setMinimumSize(QtCore.QSize(0, 30))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout.addWidget(self.label_10, 11, 0, 1, 1)
self.input_power_label = QtGui.QLabel(self.main_page)
self.input_power_label.setObjectName(_fromUtf8("input_power_label"))
self.gridLayout.addWidget(self.input_power_label, 8, 1, 1, 1)
self.voltage_group_combobox = QtGui.QComboBox(self.main_page)
self.voltage_group_combobox.setMinimumSize(QtCore.QSize(0, 0))
self.voltage_group_combobox.setObjectName(_fromUtf8("voltage_group_combobox"))
self.gridLayout.addWidget(self.voltage_group_combobox, 5, 1, 1, 1)
self.label_9 = QtGui.QLabel(self.main_page)
self.label_9.setMinimumSize(QtCore.QSize(0, 30))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 10, 0, 1, 1)
self.thermal_power_label = QtGui.QLabel(self.main_page)
self.thermal_power_label.setObjectName(_fromUtf8("thermal_power_label"))
self.gridLayout.addWidget(self.thermal_power_label, 10, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.main_page)
self.label_5.setMinimumSize(QtCore.QSize(0, 30))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 7, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.main_page)
self.label_4.setMinimumSize(QtCore.QSize(0, 30))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 5, 0, 1, 1)
self.radiant_flux_label = QtGui.QLabel(self.main_page)
self.radiant_flux_label.setObjectName(_fromUtf8("radiant_flux_label"))
self.gridLayout.addWidget(self.radiant_flux_label, 9, 1, 1, 1)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.led_header_label = QtGui.QLabel(self.main_page)
self.led_header_label.setMinimumSize(QtCore.QSize(0, 32))
self.led_header_label.setObjectName(_fromUtf8("led_header_label"))
self.horizontalLayout_6.addWidget(self.led_header_label)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem1)
self.edit_led_button = QtGui.QPushButton(self.main_page)
self.edit_led_button.setMaximumSize(QtCore.QSize(75, 16777215))
self.edit_led_button.setFlat(False)
self.edit_led_button.setObjectName(_fromUtf8("edit_led_button"))
self.horizontalLayout_6.addWidget(self.edit_led_button)
self.gridLayout.addLayout(self.horizontalLayout_6, 1, 0, 1, 2)
self.label_2 = QtGui.QLabel(self.main_page)
self.label_2.setMinimumSize(QtCore.QSize(0, 30))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 3, 0, 1, 1)
self.luminous_flux_label = QtGui.QLabel(self.main_page)
self.luminous_flux_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.luminous_flux_label.setObjectName(_fromUtf8("luminous_flux_label"))
self.gridLayout.addWidget(self.luminous_flux_label, 7, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem2, 12, 0, 1, 1)
self.brightness_group_combobox = QtGui.QComboBox(self.main_page)
self.brightness_group_combobox.setObjectName(_fromUtf8("brightness_group_combobox"))
self.gridLayout.addWidget(self.brightness_group_combobox, 4, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.main_page)
self.label_7.setMinimumSize(QtCore.QSize(0, 30))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 9, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.main_page)
self.label_3.setMinimumSize(QtCore.QSize(0, 30))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 1)
self.temperature_spinbox = QtGui.QDoubleSpinBox(self.main_page)
self.temperature_spinbox.setMaximum(9999999.0)
self.temperature_spinbox.setObjectName(_fromUtf8("temperature_spinbox"))
self.gridLayout.addWidget(self.temperature_spinbox, 3, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.main_page)
self.label_6.setMinimumSize(QtCore.QSize(0, 30))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 6, 0, 1, 1)
self.radiant_efficiency_label = QtGui.QLabel(self.main_page)
self.radiant_efficiency_label.setObjectName(_fromUtf8("radiant_efficiency_label"))
self.gridLayout.addWidget(self.radiant_efficiency_label, 11, 1, 1, 1)
self.gridLayout.setColumnStretch(1, 1)
self.horizontalLayout_2.addLayout(self.gridLayout)
self.horizontalLayout_2.setStretch(1, 1)
self.main_stacked_widget.addWidget(self.main_page)
self.editor_page = QtGui.QWidget()
self.editor_page.setObjectName(_fromUtf8("editor_page"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.editor_page)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSpacing(20)
self.horizontalLayout_4.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.led_editor_navigator_list = QtGui.QListWidget(self.editor_page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.led_editor_navigator_list.sizePolicy().hasHeightForWidth())
self.led_editor_navigator_list.setSizePolicy(sizePolicy)
self.led_editor_navigator_list.setMinimumSize(QtCore.QSize(0, 0))
self.led_editor_navigator_list.setObjectName(_fromUtf8("led_editor_navigator_list"))
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
self.horizontalLayout_4.addWidget(self.led_editor_navigator_list)
self.editor_stacked_widget = QtGui.QStackedWidget(self.editor_page)
self.editor_stacked_widget.setObjectName(_fromUtf8("editor_stacked_widget"))
self.parameter_page = QtGui.QWidget()
self.parameter_page.setObjectName(_fromUtf8("parameter_page"))
self.gridLayout_3 = QtGui.QGridLayout(self.parameter_page)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setVerticalSpacing(6)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.manufacturer_combo = QtGui.QComboBox(self.parameter_page)
self.manufacturer_combo.setEditable(True)
self.manufacturer_combo.setObjectName(_fromUtf8("manufacturer_combo"))
self.manufacturer_combo.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.manufacturer_combo, 1, 2, 1, 1)
spacerItem3 = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 7, 0, 1, 1)
self.label_20 = QtGui.QLabel(self.parameter_page)
self.label_20.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.gridLayout_3.addWidget(self.label_20, 3, 0, 1, 1)
self.label_18 = QtGui.QLabel(self.parameter_page)
self.label_18.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout_3.addWidget(self.label_18, 1, 0, 1, 1)
self.label_19 = QtGui.QLabel(self.parameter_page)
self.label_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_3.addWidget(self.label_19, 2, 0, 1, 1)
self.label_21 = QtGui.QLabel(self.parameter_page)
self.label_21.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.gridLayout_3.addWidget(self.label_21, 4, 0, 1, 1)
self.name_edit = QtGui.QLineEdit(self.parameter_page)
self.name_edit.setObjectName(_fromUtf8("name_edit"))
self.gridLayout_3.addWidget(self.name_edit, 0, 2, 1, 1)
self.label_22 = QtGui.QLabel(self.parameter_page)
self.label_22.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.gridLayout_3.addWidget(self.label_22, 5, 0, 1, 1)
self.label_23 = QtGui.QLabel(self.parameter_page)
self.label_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.gridLayout_3.addWidget(self.label_23, 6, 0, 1, 1)
self.thermal_resistance_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.thermal_resistance_spin.setMaximum(9999999.0)
self.thermal_resistance_spin.setObjectName(_fromUtf8("thermal_resistance_spin"))
self.gridLayout_3.addWidget(self.thermal_resistance_spin, 5, 2, 1, 1)
self.label_17 = QtGui.QLabel(self.parameter_page)
self.label_17.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_3.addWidget(self.label_17, 0, 0, 1, 1)
self.reference_temperature_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.reference_temperature_spin.setMaximum(9999999.0)
self.reference_temperature_spin.setObjectName(_fromUtf8("reference_temperature_spin"))
self.gridLayout_3.addWidget(self.reference_temperature_spin, 6, 2, 1, 1)
self.typical_current_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.typical_current_spin.setMaximum(9999999.0)
self.typical_current_spin.setObjectName(_fromUtf8("typical_current_spin"))
self.gridLayout_3.addWidget(self.typical_current_spin, 4, 2, 1, 1)
self.typical_voltage_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.typical_voltage_spin.setMaximum(9999999.0)
self.typical_voltage_spin.setObjectName(_fromUtf8("typical_voltage_spin"))
self.gridLayout_3.addWidget(self.typical_voltage_spin, 3, 2, 1, 1)
self.family_combo = QtGui.QComboBox(self.parameter_page)
self.family_combo.setEditable(True)
self.family_combo.setObjectName(_fromUtf8("family_combo"))
self.family_combo.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.family_combo, 2, 2, 1, 1)
self.editor_stacked_widget.addWidget(self.parameter_page)
self.brightness_group_page = QtGui.QWidget()
self.brightness_group_page.setObjectName(_fromUtf8("brightness_group_page"))
self.verticalLayout = QtGui.QVBoxLayout(self.brightness_group_page)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.brightness_group_table_view = QtGui.QTableView(self.brightness_group_page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.brightness_group_table_view.sizePolicy().hasHeightForWidth())
self.brightness_group_table_view.setSizePolicy(sizePolicy)
self.brightness_group_table_view.setObjectName(_fromUtf8("brightness_group_table_view"))
self.verticalLayout.addWidget(self.brightness_group_table_view)
self.editor_stacked_widget.addWidget(self.brightness_group_page)
self.voltage_group_page = QtGui.QWidget()
self.voltage_group_page.setObjectName(_fromUtf8("voltage_group_page"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.voltage_group_page)
self.horizontalLayout_3.setMargin(0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.voltage_group_table_view = QtGui.QTableView(self.voltage_group_page)
self.voltage_group_table_view.setObjectName(_fromUtf8("voltage_group_table_view"))
self.voltage_group_table_view.verticalHeader().setCascadingSectionResizes(True)
self.voltage_group_table_view.verticalHeader().setDefaultSectionSize(0)
self.horizontalLayout_3.addWidget(self.voltage_group_table_view)
self.editor_stacked_widget.addWidget(self.voltage_group_page)
self.current_emission_page = QtGui.QWidget()
self.current_emission_page.setObjectName(_fromUtf8("current_emission_page"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.current_emission_page)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.current_emission_table_view = QtGui.QTableView(self.current_emission_page)
self.current_emission_table_view.setObjectName(_fromUtf8("current_emission_table_view"))
self.verticalLayout_5.addWidget(self.current_emission_table_view)
self.editor_stacked_widget.addWidget(self.current_emission_page)
self.current_voltage_page = QtGui.QWidget()
self.current_voltage_page.setObjectName(_fromUtf8("current_voltage_page"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.current_voltage_page)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.current_voltage_table_view = QtGui.QTableView(self.current_voltage_page)
self.current_voltage_table_view.setObjectName(_fromUtf8("current_voltage_table_view"))
self.verticalLayout_6.addWidget(self.current_voltage_table_view)
self.editor_stacked_widget.addWidget(self.current_voltage_page)
self.temperature_emission_page = QtGui.QWidget()
self.temperature_emission_page.setObjectName(_fromUtf8("temperature_emission_page"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.temperature_emission_page)
self.horizontalLayout_7.setMargin(0)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.temperature_emission_table_view = QtGui.QTableView(self.temperature_emission_page)
self.temperature_emission_table_view.setObjectName(_fromUtf8("temperature_emission_table_view"))
self.horizontalLayout_7.addWidget(self.temperature_emission_table_view)
self.editor_stacked_widget.addWidget(self.temperature_emission_page)
self.temperature_voltage_page = QtGui.QWidget()
self.temperature_voltage_page.setObjectName(_fromUtf8("temperature_voltage_page"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.temperature_voltage_page)
self.horizontalLayout_8.setMargin(0)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.temperature_voltage_table_view = QtGui.QTableView(self.temperature_voltage_page)
self.temperature_voltage_table_view.setObjectName(_fromUtf8("temperature_voltage_table_view"))
self.horizontalLayout_8.addWidget(self.temperature_voltage_table_view)
self.editor_stacked_widget.addWidget(self.temperature_voltage_page)
self.horizontalLayout_4.addWidget(self.editor_stacked_widget)
self.horizontalLayout_4.setStretch(1, 2)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.buttonBox = QtGui.QDialogButtonBox(self.editor_page)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_2.addWidget(self.buttonBox)
self.main_stacked_widget.addWidget(self.editor_page)
self.horizontalLayout.addWidget(self.main_stacked_widget)
MainWindow.setCentralWidget(self.centralwidget)
self.voltage_label.setBuddy(self.voltage_label)
self.retranslateUi(MainWindow)
self.main_stacked_widget.setCurrentIndex(0)
self.editor_stacked_widget.setCurrentIndex(0)
QtCore.QObject.connect(self.led_editor_navigator_list, QtCore.SIGNAL(_fromUtf8("currentRowChanged(int)")), self.editor_stacked_widget.setCurrentIndex)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.name_edit, self.manufacturer_combo)
MainWindow.setTabOrder(self.manufacturer_combo, self.family_combo)
MainWindow.setTabOrder(self.family_combo, self.typical_voltage_spin)
MainWindow.setTabOrder(self.typical_voltage_spin, self.typical_current_spin)
MainWindow.setTabOrder(self.typical_current_spin, self.thermal_resistance_spin)
MainWindow.setTabOrder(self.thermal_resistance_spin, self.reference_temperature_spin)
MainWindow.setTabOrder(self.reference_temperature_spin, self.buttonBox)
MainWindow.setTabOrder(self.buttonBox, self.led_editor_navigator_list)
MainWindow.setTabOrder(self.led_editor_navigator_list, self.brightness_group_combobox)
MainWindow.setTabOrder(self.brightness_group_combobox, self.voltage_group_combobox)
MainWindow.setTabOrder(self.voltage_group_combobox, self.edit_led_button)
MainWindow.setTabOrder(self.edit_led_button, self.led_filter)
MainWindow.setTabOrder(self.led_filter, self.led_list_view)
MainWindow.setTabOrder(self.led_list_view, self.add_led_button)
MainWindow.setTabOrder(self.add_led_button, self.remove_led_button)
MainWindow.setTabOrder(self.remove_led_button, self.temperature_spinbox)
MainWindow.setTabOrder(self.temperature_spinbox, self.current_spinbox)
MainWindow.setTabOrder(self.current_spinbox, self.brightness_group_table_view)
MainWindow.setTabOrder(self.brightness_group_table_view, self.voltage_group_table_view)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "LED Tool", None))
self.led_filter.setPlaceholderText(_translate("MainWindow", "Filter", None))
self.add_led_button.setText(_translate("MainWindow", "+", None))
self.remove_led_button.setText(_translate("MainWindow", "−", None))
self.voltage_label.setText(_translate("MainWindow", "--- V", None))
self.label.setText(_translate("MainWindow", "Current:", None))
self.current_spinbox.setSuffix(_translate("MainWindow", " mA", None))
self.label_8.setText(_translate("MainWindow", "Input Power:", None))
self.label_10.setText(_translate("MainWindow", "Radiant Efficiency:", None))
self.input_power_label.setText(_translate("MainWindow", "--- W", None))
self.label_9.setText(_translate("MainWindow", "Thermal Power:", None))
self.thermal_power_label.setText(_translate("MainWindow", "--- W", None))
self.label_5.setText(_translate("MainWindow", "Luminous Flux:", None))
self.label_4.setText(_translate("MainWindow", "Voltage Group:", None))
self.radiant_flux_label.setText(_translate("MainWindow", "--- W", None))
self.led_header_label.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">LUMILEDS LUXEON F ES</span></p></body></html>", None))
self.edit_led_button.setText(_translate("MainWindow", "Edit", None))
self.label_2.setText(_translate("MainWindow", "Temperature:", None))
self.luminous_flux_label.setText(_translate("MainWindow", "--- lm", None))
self.label_7.setText(_translate("MainWindow", "Radiant Flux:", None))
self.label_3.setText(_translate("MainWindow", "Brightness Group:", None))
self.temperature_spinbox.setSuffix(_translate("MainWindow", " °C", None))
self.label_6.setText(_translate("MainWindow", "Voltage:", None))
self.radiant_efficiency_label.setText(_translate("MainWindow", "--- %", None))
__sortingEnabled = self.led_editor_navigator_list.isSortingEnabled()
self.led_editor_navigator_list.setSortingEnabled(False)
item = self.led_editor_navigator_list.item(0)
item.setText(_translate("MainWindow", "Parameter", None))
item = self.led_editor_navigator_list.item(1)
item.setText(_translate("MainWindow", "Brightness Groups", None))
item = self.led_editor_navigator_list.item(2)
item.setText(_translate("MainWindow", "Voltage Groups", None))
item = self.led_editor_navigator_list.item(3)
item.setText(_translate("MainWindow", "Current vs. Emission", None))
item = self.led_editor_navigator_list.item(4)
item.setText(_translate("MainWindow", "Current vs. Voltage", None))
item = self.led_editor_navigator_list.item(5)
item.setText(_translate("MainWindow", "Temperature vs. Emission", None))
item = self.led_editor_navigator_list.item(6)
item.setText(_translate("MainWindow", "Temperature vs. Voltage", None))
self.led_editor_navigator_list.setSortingEnabled(__sortingEnabled)
self.manufacturer_combo.setItemText(0, _translate("MainWindow", "OSRAM", None))
self.label_20.setText(_translate("MainWindow", "Typical Voltage:", None))
self.label_18.setText(_translate("MainWindow", "Manufacturer:", None))
self.label_19.setText(_translate("MainWindow", "Family:", None))
self.label_21.setText(_translate("MainWindow", "Typical Current:", None))
self.name_edit.setText(_translate("MainWindow", "LA T67F", None))
self.label_22.setText(_translate("MainWindow", "Thermal Resistance:", None))
self.label_23.setText(_translate("MainWindow", "Reference Temperature:", None))
self.thermal_resistance_spin.setSuffix(_translate("MainWindow", " K/W", None))
self.label_17.setText(_translate("MainWindow", "Name:", None))
self.reference_temperature_spin.setSuffix(_translate("MainWindow", " °C", None))
self.typical_current_spin.setSuffix(_translate("MainWindow", " mA", None))
self.typical_voltage_spin.setSuffix(_translate("MainWindow", " V", None))
self.family_combo.setItemText(0, _translate("MainWindow", "TOPLED", None))
| gpl-3.0 | 8,376,805,521,591,974,000 | 64.893365 | 190 | 0.706405 | false |
syucream/mrubook | conf.py | 1 | 9757 | # -*- coding: utf-8 -*-
#
# mrubook documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 3 14:17:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mrubook'
copyright = u'2016, Ryo Okubo'
author = u'Ryo Okubo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'mrubook v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mrubookdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mrubook.tex', u'mrubook Documentation',
u'Ryo Okubo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mrubook', u'mrubook Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mrubook', u'mrubook Documentation',
author, 'mrubook', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| mit | -4,150,921,000,730,676,000 | 27.866864 | 80 | 0.691196 | false |
ukch/refugeedata | refugeedata/distribution/decorators.py | 1 | 2228 | import datetime
import functools
from pyratemp import TemplateSyntaxError, TemplateRenderError
from django.contrib.auth import PermissionDenied
from django.shortcuts import get_object_or_404, redirect, render
from refugeedata.models import Distribution, Template
from .forms import DistributionHashForm
def standard_distribution_access(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
dist = get_object_or_404(
Distribution, id=kwargs.pop('distribution_id'))
if not request.user.is_superuser:
if dist.date != datetime.date.today():
raise PermissionDenied()
if not request.user.has_perm("distribution", obj=dist):
if request.method == "POST":
form = DistributionHashForm(dist, request.POST)
if form.is_valid():
request.session["distribution_hash"] = \
form.cleaned_data["password"]
return redirect(request.path)
else:
form = DistributionHashForm(dist)
return render(request, "distribution/login.html", {
"distribution": dist,
"form": form,
})
kwargs['distribution'] = dist
return func(request, *args, **kwargs)
return wrapper
def handle_template_errors(func):
@functools.wraps(func)
def wrapper(request, distribution_id, *args, **kwargs):
distribution = get_object_or_404(Distribution, id=distribution_id)
try:
return func(request, distribution, *args, **kwargs)
except (TemplateSyntaxError, TemplateRenderError) as e:
if hasattr(e, "filename"):
template_id = e.filename
else:
template_id = kwargs.get("template_id")
if template_id:
template = Template.objects.filter(id=template_id).first()
return render(request, "distribution/template_syntax_error.html", {
"distribution": distribution,
"template": template,
"exception": e,
}, status=400)
return wrapper
| mit | -3,910,878,151,373,112,300 | 35.52459 | 79 | 0.584829 | false |
mvo5/snapcraft | tests/unit/sources/test_mercurial.py | 1 | 9010 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import subprocess
from unittest import mock
from testtools.matchers import Equals
from snapcraft.internal import sources
from tests import unit
from tests.subprocess_utils import call, call_with_output
# LP: #1733584
class TestMercurial(unit.sources.SourceTestCase): # type: ignore
def setUp(self):
super().setUp()
patcher = mock.patch("snapcraft.sources.Mercurial._get_source_details")
self.mock_get_source_details = patcher.start()
self.mock_get_source_details.return_value = ""
self.addCleanup(patcher.stop)
def test_pull(self):
hg = sources.Mercurial("hg://my-source", "source_dir")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "hg://my-source", "source_dir"]
)
def test_pull_branch(self):
hg = sources.Mercurial(
"hg://my-source", "source_dir", source_branch="my-branch"
)
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "-u", "my-branch", "hg://my-source", "source_dir"]
)
def test_pull_tag(self):
hg = sources.Mercurial("hg://my-source", "source_dir", source_tag="tag")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "-u", "tag", "hg://my-source", "source_dir"]
)
def test_pull_commit(self):
hg = sources.Mercurial("hg://my-source", "source_dir", source_commit="2")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "clone", "-u", "2", "hg://my-source", "source_dir"]
)
def test_pull_existing(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial("hg://my-source", "source_dir")
hg.pull()
self.mock_run.assert_called_once_with(["hg", "pull", "hg://my-source"])
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial("hg://my-source", "source_dir", source_tag="tag")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "pull", "-r", "tag", "hg://my-source"]
)
def test_pull_existing_with_commit(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial("hg://my-source", "source_dir", source_commit="2")
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "pull", "-r", "2", "hg://my-source"]
)
def test_pull_existing_with_branch(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial(
"hg://my-source", "source_dir", source_branch="my-branch"
)
hg.pull()
self.mock_run.assert_called_once_with(
["hg", "pull", "-b", "my-branch", "hg://my-source"]
)
def test_init_with_source_branch_and_tag_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_tag="tag",
source_branch="branch",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.options, Equals(["source-tag", "source-branch"]))
def test_init_with_source_commit_and_tag_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_commit="2",
source_tag="tag",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.options, Equals(["source-tag", "source-commit"]))
def test_init_with_source_commit_and_branch_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_commit="2",
source_branch="branch",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.options, Equals(["source-branch", "source-commit"]))
def test_init_with_source_depth_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_depth=2,
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.option, Equals("source-depth"))
def test_source_checksum_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Mercurial,
"hg://mysource",
"source_dir",
source_checksum="md5/d9210476aac5f367b14e513bdefdee08",
)
self.assertThat(raised.source_type, Equals("mercurial"))
self.assertThat(raised.option, Equals("source-checksum"))
def test_has_source_handler_entry(self):
self.assertTrue(sources._source_handler["mercurial"] is sources.Mercurial)
def test_pull_failure(self):
self.mock_run.side_effect = subprocess.CalledProcessError(1, [])
hg = sources.Mercurial("hg://my-source", "source_dir")
raised = self.assertRaises(sources.errors.SnapcraftPullError, hg.pull)
self.assertThat(raised.command, Equals("hg clone hg://my-source source_dir"))
self.assertThat(raised.exit_code, Equals(1))
class MercurialBaseTestCase(unit.TestCase):
def rm_dir(self, dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def clean_dir(self, dir):
self.rm_dir(dir)
os.mkdir(dir)
self.addCleanup(self.rm_dir, dir)
def clone_repo(self, repo, tree):
self.clean_dir(tree)
call(["hg", "clone", repo, tree])
os.chdir(tree)
def add_file(self, filename, body, message):
with open(filename, "w") as fp:
fp.write(body)
call(["hg", "add", filename])
call(["hg", "commit", "-am", message])
def check_file_contents(self, path, expected):
body = None
with open(path) as fp:
body = fp.read()
self.assertThat(body, Equals(expected))
class MercurialDetailsTestCase(MercurialBaseTestCase):
def setUp(self):
super().setUp()
self.working_tree = "hg-test"
self.source_dir = "hg-checkout"
self.clean_dir(self.working_tree)
self.clean_dir(self.source_dir)
os.chdir(self.working_tree)
call(["hg", "init"])
with open("testing", "w") as fp:
fp.write("testing")
call(["hg", "add", "testing"])
call(["hg", "commit", "-m", "testing", "-u", "Test User <[email protected]>"])
call(["hg", "tag", "-u", "test", "test-tag"])
self.expected_commit = call_with_output(["hg", "id"]).split()[0]
self.expected_branch = call_with_output(["hg", "branch"])
self.expected_tag = "test-tag"
os.chdir("..")
self.hg = sources.Mercurial(self.working_tree, self.source_dir, silent=True)
self.hg.pull()
self.source_details = self.hg._get_source_details()
def test_hg_details_commit(self):
self.assertThat(
self.source_details["source-commit"], Equals(self.expected_commit)
)
def test_hg_details_branch(self):
self.clean_dir(self.source_dir)
self.hg = sources.Mercurial(
self.working_tree, self.source_dir, silent=True, source_branch="default"
)
self.hg.pull()
self.source_details = self.hg._get_source_details()
self.assertThat(
self.source_details["source-branch"], Equals(self.expected_branch)
)
def test_hg_details_tag(self):
self.clean_dir(self.source_dir)
self.hg = sources.Mercurial(
self.working_tree, self.source_dir, silent=True, source_tag="test-tag"
)
self.hg.pull()
self.source_details = self.hg._get_source_details()
self.assertThat(self.source_details["source-tag"], Equals(self.expected_tag))
| gpl-3.0 | 7,427,885,006,021,944,000 | 32.87218 | 85 | 0.602442 | false |
Distrotech/reportlab | src/reportlab/platypus/flowables.py | 1 | 72939 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/flowables.py
__version__=''' $Id$ '''
__doc__="""
A flowable is a "floating element" in a document whose exact position is determined by the
other elements that precede it, such as a paragraph, a diagram interspersed between paragraphs,
a section header, etcetera. Examples of non-flowables include page numbering annotations,
headers, footers, fixed diagrams or logos, among others.
Flowables are defined here as objects which know how to determine their size and which
can draw themselves onto a page with respect to a relative "origin" position determined
at a higher level. The object's draw() method should assume that (0,0) corresponds to the
bottom left corner of the enclosing rectangle that will contain the object. The attributes
vAlign and hAlign may be used by 'packers' as hints as to how the object should be placed.
Some Flowables also know how to "split themselves". For example a
long paragraph might split itself between one page and the next.
Packers should set the canv attribute during wrap, split & draw operations to allow
the flowable to work out sizes etc in the proper context.
The "text" of a document usually consists mainly of a sequence of flowables which
flow into a document from top to bottom (with column and page breaks controlled by
higher level components).
"""
import os
from copy import deepcopy, copy
from reportlab.lib.colors import red, gray, lightgrey
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.styles import _baseFontName
from reportlab.lib.utils import strTypes
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.rl_config import _FUZZ, overlapAttachedSpace, ignoreContainerActions, listWrapOnFakeWidth
import collections
__all__=('TraceInfo','Flowable','XBox','Preformatted','Image','Spacer','PageBreak','SlowPageBreak',
'CondPageBreak','KeepTogether','Macro','CallerMacro','ParagraphAndImage',
'FailOnWrap','HRFlowable','PTOContainer','KeepInFrame','UseUpSpace',
'ListFlowable','ListItem','DDIndenter','LIIndenter',
'DocAssign', 'DocExec', 'DocAssert', 'DocPara', 'DocIf', 'DocWhile',
'PageBreakIfNotEmpty',
)
class TraceInfo:
"Holder for info about where an object originated"
def __init__(self):
self.srcFile = '(unknown)'
self.startLineNo = -1
self.startLinePos = -1
self.endLineNo = -1
self.endLinePos = -1
#############################################################
# Flowable Objects - a base class and a few examples.
# One is just a box to get some metrics. We also have
# a paragraph, an image and a special 'page break'
# object which fills the space.
#############################################################
class Flowable:
"""Abstract base class for things to be drawn. Key concepts:
1. It knows its size
2. It draws in its own coordinate system (this requires the
base API to provide a translate() function.
"""
_fixedWidth = 0 #assume wrap results depend on arguments?
_fixedHeight = 0
def __init__(self):
self.width = 0
self.height = 0
self.wrapped = 0
#these are hints to packers/frames as to how the floable should be positioned
self.hAlign = 'LEFT' #CENTER/CENTRE or RIGHT
self.vAlign = 'BOTTOM' #MIDDLE or TOP
#optional holder for trace info
self._traceInfo = None
self._showBoundary = None
#many flowables handle text and must be processed in the
#absence of a canvas. tagging them with their encoding
#helps us to get conversions right. Use Python codec names.
self.encoding = None
def _drawOn(self,canv):
'''ensure canv is set on and then draw'''
self.canv = canv
self.draw()#this is the bit you overload
del self.canv
def _hAlignAdjust(self,x,sW=0):
if sW and hasattr(self,'hAlign'):
a = self.hAlign
if a in ('CENTER','CENTRE', TA_CENTER):
x += 0.5*sW
elif a in ('RIGHT',TA_RIGHT):
x += sW
elif a not in ('LEFT',TA_LEFT):
raise ValueError("Bad hAlign value "+str(a))
return x
def drawOn(self, canvas, x, y, _sW=0):
"Tell it to draw itself on the canvas. Do not override"
x = self._hAlignAdjust(x,_sW)
canvas.saveState()
canvas.translate(x, y)
self._drawOn(canvas)
if hasattr(self, '_showBoundary') and self._showBoundary:
#diagnostic tool support
canvas.setStrokeColor(gray)
canvas.rect(0,0,self.width, self.height)
canvas.restoreState()
def wrapOn(self, canv, aW, aH):
'''intended for use by packers allows setting the canvas on
during the actual wrap'''
self.canv = canv
w, h = self.wrap(aW,aH)
del self.canv
return w, h
def wrap(self, availWidth, availHeight):
"""This will be called by the enclosing frame before objects
are asked their size, drawn or whatever. It returns the
size actually used."""
return (self.width, self.height)
def minWidth(self):
"""This should return the minimum required width"""
return getattr(self,'_minWidth',self.width)
def splitOn(self, canv, aW, aH):
'''intended for use by packers allows setting the canvas on
during the actual split'''
self.canv = canv
S = self.split(aW,aH)
del self.canv
return S
def split(self, availWidth, availheight):
"""This will be called by more sophisticated frames when
wrap fails. Stupid flowables should return []. Clever flowables
should split themselves and return a list of flowables.
If they decide that nothing useful can be fitted in the
available space (e.g. if you have a table and not enough
space for the first row), also return []"""
return []
def getKeepWithNext(self):
"""returns boolean determining whether the next flowable should stay with this one"""
if hasattr(self,'keepWithNext'): return self.keepWithNext
elif hasattr(self,'style') and hasattr(self.style,'keepWithNext'): return self.style.keepWithNext
else: return 0
def getSpaceAfter(self):
"""returns how much space should follow this item if another item follows on the same page."""
if hasattr(self,'spaceAfter'): return self.spaceAfter
elif hasattr(self,'style') and hasattr(self.style,'spaceAfter'): return self.style.spaceAfter
else: return 0
def getSpaceBefore(self):
"""returns how much space should precede this item if another item precedess on the same page."""
if hasattr(self,'spaceBefore'): return self.spaceBefore
elif hasattr(self,'style') and hasattr(self.style,'spaceBefore'): return self.style.spaceBefore
else: return 0
def isIndexing(self):
"""Hook for IndexingFlowables - things which have cross references"""
return 0
def identity(self, maxLen=None):
'''
This method should attempt to return a string that can be used to identify
a particular flowable uniquely. The result can then be used for debugging
and or error printouts
'''
if hasattr(self, 'getPlainText'):
r = self.getPlainText(identify=1)
elif hasattr(self, 'text'):
r = str(self.text)
else:
r = '...'
if r and maxLen:
r = r[:maxLen]
return "<%s at %s%s>%s" % (self.__class__.__name__, hex(id(self)), self._frameName(), r)
def _doctemplateAttr(self,a):
return getattr(getattr(getattr(self,'canv',None),'_doctemplate',None),a,None)
def _frameName(self):
f = getattr(self,'_frame',None)
if not f: f = self._doctemplateAttr('frame')
if f and f.id: return ' frame=%s' % f.id
return ''
class XBox(Flowable):
"""Example flowable - a box with an x through it and a caption.
This has a known size, so does not need to respond to wrap()."""
def __init__(self, width, height, text = 'A Box'):
Flowable.__init__(self)
self.width = width
self.height = height
self.text = text
def __repr__(self):
return "XBox(w=%s, h=%s, t=%s)" % (self.width, self.height, self.text)
def draw(self):
self.canv.rect(0, 0, self.width, self.height)
self.canv.line(0, 0, self.width, self.height)
self.canv.line(0, self.height, self.width, 0)
#centre the text
self.canv.setFont(_baseFontName,12)
self.canv.drawCentredString(0.5*self.width, 0.5*self.height, self.text)
def _trimEmptyLines(lines):
#don't want the first or last to be empty
while len(lines) and lines[0].strip() == '':
lines = lines[1:]
while len(lines) and lines[-1].strip() == '':
lines = lines[:-1]
return lines
def _dedenter(text,dedent=0):
'''
tidy up text - carefully, it is probably code. If people want to
indent code within a source script, you can supply an arg to dedent
and it will chop off that many character, otherwise it leaves
left edge intact.
'''
lines = text.split('\n')
if dedent>0:
templines = _trimEmptyLines(lines)
lines = []
for line in templines:
line = line[dedent:].rstrip()
lines.append(line)
else:
lines = _trimEmptyLines(lines)
return lines
SPLIT_CHARS = "[{( ,.;:/\\-"
def splitLines(lines, maximum_length, split_characters, new_line_characters):
if split_characters is None:
split_characters = SPLIT_CHARS
if new_line_characters is None:
new_line_characters = ""
# Return a table of lines
lines_splitted = []
for line in lines:
if len(line) > maximum_length:
splitLine(line, lines_splitted, maximum_length, \
split_characters, new_line_characters)
else:
lines_splitted.append(line)
return lines_splitted
def splitLine(line_to_split, lines_splitted, maximum_length, \
split_characters, new_line_characters):
# Used to implement the characters added
#at the beginning of each new line created
first_line = True
# Check if the text can be splitted
while line_to_split and len(line_to_split)>0:
# Index of the character where we can split
split_index = 0
# Check if the line length still exceeds the maximum length
if len(line_to_split) <= maximum_length:
# Return the remaining of the line
split_index = len(line_to_split)
else:
# Iterate for each character of the line
for line_index in range(maximum_length):
# Check if the character is in the list
# of allowed characters to split on
if line_to_split[line_index] in split_characters:
split_index = line_index + 1
# If the end of the line was reached
# with no character to split on
if split_index==0:
split_index = line_index + 1
if first_line:
lines_splitted.append(line_to_split[0:split_index])
first_line = False
maximum_length -= len(new_line_characters)
else:
lines_splitted.append(new_line_characters + \
line_to_split[0:split_index])
# Remaining text to split
line_to_split = line_to_split[split_index:]
class Preformatted(Flowable):
"""This is like the HTML <PRE> tag.
It attempts to display text exactly as you typed it in a fixed width "typewriter" font.
By default the line breaks are exactly where you put them, and it will not be wrapped.
You can optionally define a maximum line length and the code will be wrapped; and
extra characters to be inserted at the beginning of each wrapped line (e.g. '> ').
"""
def __init__(self, text, style, bulletText = None, dedent=0, maxLineLength=None, splitChars=None, newLineChars=""):
"""text is the text to display. If dedent is set then common leading space
will be chopped off the front (for example if the entire text is indented
6 spaces or more then each line will have 6 spaces removed from the front).
"""
self.style = style
self.bulletText = bulletText
self.lines = _dedenter(text,dedent)
if text and maxLineLength:
self.lines = splitLines(
self.lines,
maxLineLength,
splitChars,
newLineChars
)
def __repr__(self):
bT = self.bulletText
H = "Preformatted("
if bT is not None:
H = "Preformatted(bulletText=%s," % repr(bT)
return "%s'''\\ \n%s''')" % (H, '\n'.join(self.lines))
def wrap(self, availWidth, availHeight):
self.width = availWidth
self.height = self.style.leading*len(self.lines)
return (self.width, self.height)
def minWidth(self):
style = self.style
fontSize = style.fontSize
fontName = style.fontName
return max([stringWidth(line,fontName,fontSize) for line in self.lines])
def split(self, availWidth, availHeight):
#returns two Preformatted objects
#not sure why they can be called with a negative height
if availHeight < self.style.leading:
return []
linesThatFit = int(availHeight * 1.0 / self.style.leading)
text1 = '\n'.join(self.lines[0:linesThatFit])
text2 = '\n'.join(self.lines[linesThatFit:])
style = self.style
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
return [Preformatted(text1, self.style), Preformatted(text2, style)]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
cur_x = self.style.leftIndent
cur_y = self.height - self.style.fontSize
self.canv.addLiteral('%PreformattedPara')
if self.style.textColor:
self.canv.setFillColor(self.style.textColor)
tx = self.canv.beginText(cur_x, cur_y)
#set up the font etc.
tx.setFont( self.style.fontName,
self.style.fontSize,
self.style.leading)
for text in self.lines:
tx.textLine(text)
self.canv.drawText(tx)
class Image(Flowable):
"""an image (digital picture). Formats supported by PIL/Java 1.4 (the Python/Java Imaging Library
are supported. Images as flowables may be aligned horizontally in the
frame with the hAlign parameter - accepted values are 'CENTER',
'LEFT' or 'RIGHT' with 'CENTER' being the default.
We allow for two kinds of lazyness to allow for many images in a document
which could lead to file handle starvation.
lazy=1 don't open image until required.
lazy=2 open image when required then shut it.
"""
_fixedWidth = 1
_fixedHeight = 1
def __init__(self, filename, width=None, height=None, kind='direct',
mask="auto", lazy=1, hAlign='CENTER'):
"""If size to draw at not specified, get it from the image."""
self.hAlign = hAlign
self._mask = mask
fp = hasattr(filename,'read')
if fp:
self._file = filename
self.filename = repr(filename)
else:
self._file = self.filename = filename
if not fp and os.path.splitext(filename)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']:
# if it is a JPEG, will be inlined within the file -
# but we still need to know its size now
from reportlab.lib.utils import open_for_read
f = open_for_read(filename, 'b')
try:
try:
info = pdfutils.readJPEGInfo(f)
except:
#couldn't read as a JPEG, try like normal
self._setup(width,height,kind,lazy)
return
finally:
f.close()
self.imageWidth = info[0]
self.imageHeight = info[1]
self._img = None
self._setup(width,height,kind,0)
elif fp:
self._setup(width,height,kind,0)
else:
self._setup(width,height,kind,lazy)
def _setup(self,width,height,kind,lazy):
self._lazy = lazy
self._width = width
self._height = height
self._kind = kind
if lazy<=0: self._setup_inner()
def _setup_inner(self):
width = self._width
height = self._height
kind = self._kind
img = self._img
if img: self.imageWidth, self.imageHeight = img.getSize()
if self._lazy>=2: del self._img
if kind in ['direct','absolute']:
self.drawWidth = width or self.imageWidth
self.drawHeight = height or self.imageHeight
elif kind in ['percentage','%']:
self.drawWidth = self.imageWidth*width*0.01
self.drawHeight = self.imageHeight*height*0.01
elif kind in ['bound','proportional']:
factor = min(float(width)/self.imageWidth,float(height)/self.imageHeight)
self.drawWidth = self.imageWidth*factor
self.drawHeight = self.imageHeight*factor
def _restrictSize(self,aW,aH):
if self.drawWidth>aW+_FUZZ or self.drawHeight>aH+_FUZZ:
self._oldDrawSize = self.drawWidth, self.drawHeight
factor = min(float(aW)/self.drawWidth,float(aH)/self.drawHeight)
self.drawWidth *= factor
self.drawHeight *= factor
return self.drawWidth, self.drawHeight
def _unRestrictSize(self):
dwh = getattr(self,'_oldDrawSize',None)
if dwh:
self.drawWidth, self.drawHeight = dwh
def __getattr__(self,a):
if a=='_img':
from reportlab.lib.utils import ImageReader #this may raise an error
self._img = ImageReader(self._file)
if not isinstance(self._file,strTypes):
self._file = None
if self._lazy>=2: self._lazy = 1 #here we're assuming we cannot read again
return self._img
elif a in ('drawWidth','drawHeight','imageWidth','imageHeight'):
self._setup_inner()
return self.__dict__[a]
raise AttributeError("<Image @ 0x%x>.%s" % (id(self),a))
def wrap(self, availWidth, availHeight):
#the caller may decide it does not fit.
return self.drawWidth, self.drawHeight
def draw(self):
lazy = self._lazy
if lazy>=2: self._lazy = 1
self.canv.drawImage( self._img or self.filename,
getattr(self,'_offs_x',0),
getattr(self,'_offs_y',0),
self.drawWidth,
self.drawHeight,
mask=self._mask,
)
if lazy>=2:
self._img = self._file = None
self._lazy = lazy
def identity(self,maxLen=None):
r = Flowable.identity(self,maxLen)
if r[-4:]=='>...' and isinstance(self.filename,str):
r = "%s filename=%s>" % (r[:-4],self.filename)
return r
class NullDraw(Flowable):
def draw(self):
pass
class Spacer(NullDraw):
"""A spacer just takes up space and doesn't draw anything - it guarantees
a gap between objects."""
_fixedWidth = 1
_fixedHeight = 1
def __init__(self, width, height, isGlue=False):
self.width = width
if isGlue:
self.height = 1e-4
self.spacebefore = height
self.height = height
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,self.width, self.height)
class UseUpSpace(NullDraw):
def __init__(self):
pass
def __repr__(self):
return "%s()" % self.__class__.__name__
def wrap(self, availWidth, availHeight):
self.width = availWidth
self.height = availHeight
return (availWidth,availHeight-1e-8) #step back a point
class PageBreak(UseUpSpace):
"""Move on to the next page in the document.
This works by consuming all remaining space in the frame!"""
def __init__(self,nextTemplate=None):
self.nextTemplate = nextTemplate
class SlowPageBreak(PageBreak):
pass
class PageBreakIfNotEmpty(PageBreak):
pass
class CondPageBreak(Spacer):
"""use up a frame if not enough vertical space effectively CondFrameBreak"""
def __init__(self, height):
self.height = height
def __repr__(self):
return "CondPageBreak(%s)" %(self.height,)
def wrap(self, availWidth, availHeight):
if availHeight<self.height:
f = self._doctemplateAttr('frame')
if not f: return availWidth, availHeight
from reportlab.platypus.doctemplate import FrameBreak
f.add_generated_content(FrameBreak)
return 0, 0
def identity(self,maxLen=None):
return repr(self).replace(')',',frame=%s)'%self._frameName())
def _listWrapOn(F,availWidth,canv,mergeSpace=1,obj=None,dims=None,fakeWidth=None):
'''return max width, required height for a list of flowables F'''
doct = getattr(canv,'_doctemplate',None)
cframe = getattr(doct,'frame',None)
if fakeWidth is None:
fakeWidth = listWrapOnFakeWidth
if cframe:
from reportlab.platypus.doctemplate import _addGeneratedContent, Indenter
doct_frame = cframe
cframe = doct.frame = deepcopy(doct_frame)
cframe._generated_content = None
del cframe._generated_content
try:
W = 0
H = 0
pS = 0
atTop = 1
F = F[:]
while F:
f = F.pop(0)
if hasattr(f,'frameAction'):
from reportlab.platypus.doctemplate import Indenter
if isinstance(f,Indenter):
availWidth -= f.left+f.right
continue
w,h = f.wrapOn(canv,availWidth,0xfffffff)
if dims is not None: dims.append((w,h))
if cframe:
_addGeneratedContent(F,cframe)
if w<=_FUZZ or h<=_FUZZ: continue
W = max(W,min(w,availWidth) if fakeWidth else w)
H += h
if not atTop:
h = f.getSpaceBefore()
if mergeSpace:
if getattr(f,'_SPACETRANSFER',False):
h = pS
h = max(h-pS,0)
H += h
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
s = f.getSpaceAfter()
if getattr(f,'_SPACETRANSFER',False):
s = pS
pS = s
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H-pS
finally:
if cframe:
doct.frame = doct_frame
def _flowableSublist(V):
"if it isn't a list or tuple, wrap it in a list"
if not isinstance(V,(list,tuple)): V = V is not None and [V] or []
from reportlab.platypus.doctemplate import LCActionFlowable
assert not [x for x in V if isinstance(x,LCActionFlowable)],'LCActionFlowables not allowed in sublists'
return V
class _ContainerSpace: #Abstract some common container like behaviour
def getSpaceBefore(self):
for c in self._content:
if not hasattr(c,'frameAction'):
return c.getSpaceBefore()
return 0
def getSpaceAfter(self,content=None):
#this needs 2.4
#for c in reversed(content or self._content):
reverseContent = (content or self._content)[:]
reverseContent.reverse()
for c in reverseContent:
if not hasattr(c,'frameAction'):
return c.getSpaceAfter()
return 0
class KeepTogether(_ContainerSpace,Flowable):
def __init__(self,flowables,maxHeight=None):
self._content = _flowableSublist(flowables)
self._maxHeight = maxHeight
def __repr__(self):
f = self._content
L = list(map(repr,f))
L = "\n"+"\n".join(L)
L = L.replace("\n", "\n ")
return "%s(%s,maxHeight=%s)" % (self.__class__.__name__,L,self._maxHeight)
def wrap(self, aW, aH):
dims = []
W,H = _listWrapOn(self._content,aW,self.canv,dims=dims)
self._H = H
self._H0 = dims and dims[0][1] or 0
self._wrapInfo = aW,aH
return W, 0xffffff # force a split
def split(self, aW, aH):
if getattr(self,'_wrapInfo',None)!=(aW,aH): self.wrap(aW,aH)
S = self._content[:]
atTop = getattr(self,'_frame',None)
if atTop: atTop = getattr(atTop,'_atTop',None)
C0 = self._H>aH and (not self._maxHeight or aH>self._maxHeight)
C1 = (self._H0>aH) or C0 and atTop
if C0 or C1:
if C0:
from reportlab.platypus.doctemplate import FrameBreak
A = FrameBreak
else:
from reportlab.platypus.doctemplate import NullActionFlowable
A = NullActionFlowable
S.insert(0,A())
return S
def identity(self, maxLen=None):
msg = "<%s at %s%s> containing :%s" % (self.__class__.__name__,hex(id(self)),self._frameName(),"\n".join([f.identity() for f in self._content]))
if maxLen:
return msg[0:maxLen]
else:
return msg
class Macro(Flowable):
"""This is not actually drawn (i.e. it has zero height)
but is executed when it would fit in the frame. Allows direct
access to the canvas through the object 'canvas'"""
def __init__(self, command):
self.command = command
def __repr__(self):
return "Macro(%s)" % repr(self.command)
def wrap(self, availWidth, availHeight):
return (0,0)
def draw(self):
exec(self.command, globals(), {'canvas':self.canv})
def _nullCallable(*args,**kwds):
pass
class CallerMacro(Flowable):
'''
like Macro, but with callable command(s)
drawCallable(self)
wrapCallable(self,aW,aH)
'''
def __init__(self, drawCallable=None, wrapCallable=None):
self._drawCallable = drawCallable or _nullCallable
self._wrapCallable = wrapCallable or _nullCallable
def __repr__(self):
return "CallerMacro(%r,%r)" % (self._drawCallable,self._wrapCallable)
def wrap(self, aW, aH):
self._wrapCallable(self,aW,aH)
return (0,0)
def draw(self):
self._drawCallable(self)
class ParagraphAndImage(Flowable):
'''combine a Paragraph and an Image'''
def __init__(self,P,I,xpad=3,ypad=3,side='right'):
self.P = P
self.I = I
self.xpad = xpad
self.ypad = ypad
self._side = side
def getSpaceBefore(self):
return max(self.P.getSpaceBefore(),self.I.getSpaceBefore())
def getSpaceAfter(self):
return max(self.P.getSpaceAfter(),self.I.getSpaceAfter())
def wrap(self,availWidth,availHeight):
wI, hI = self.I.wrap(availWidth,availHeight)
self.wI = wI
self.hI = hI
# work out widths array for breaking
self.width = availWidth
P = self.P
style = P.style
xpad = self.xpad
ypad = self.ypad
leading = style.leading
leftIndent = style.leftIndent
later_widths = availWidth - leftIndent - style.rightIndent
intermediate_widths = later_widths - xpad - wI
first_line_width = intermediate_widths - style.firstLineIndent
P.width = 0
nIW = int((hI+ypad)/(leading*1.0))
P.blPara = P.breakLines([first_line_width] + nIW*[intermediate_widths]+[later_widths])
if self._side=='left':
self._offsets = [wI+xpad]*(1+nIW)+[0]
P.height = len(P.blPara.lines)*leading
self.height = max(hI,P.height)
return (self.width, self.height)
def split(self,availWidth, availHeight):
P, wI, hI, ypad = self.P, self.wI, self.hI, self.ypad
if hI+ypad>availHeight or len(P.frags)<=0: return []
S = P.split(availWidth,availHeight)
if not S: return S
P = self.P = S[0]
del S[0]
style = P.style
P.height = len(self.P.blPara.lines)*style.leading
self.height = max(hI,P.height)
return [self]+S
def draw(self):
canv = self.canv
if self._side=='left':
self.I.drawOn(canv,0,self.height-self.hI)
self.P._offsets = self._offsets
try:
self.P.drawOn(canv,0,0)
finally:
del self.P._offsets
else:
self.I.drawOn(canv,self.width-self.wI-self.xpad,self.height-self.hI)
self.P.drawOn(canv,0,0)
class FailOnWrap(NullDraw):
def wrap(self, availWidth, availHeight):
raise ValueError("FailOnWrap flowable wrapped and failing as ordered!")
class FailOnDraw(Flowable):
def wrap(self, availWidth, availHeight):
return 0,0
def draw(self):
raise ValueError("FailOnDraw flowable drawn, and failing as ordered!")
class HRFlowable(Flowable):
'''Like the hr tag'''
def __init__(self,
width="80%",
thickness=1,
lineCap='round',
color=lightgrey,
spaceBefore=1, spaceAfter=1,
hAlign='CENTER', vAlign='BOTTOM',
dash=None):
Flowable.__init__(self)
self.width = width
self.lineWidth = thickness
self.lineCap=lineCap
self.spaceBefore = spaceBefore
self.spaceAfter = spaceAfter
self.color = color
self.hAlign = hAlign
self.vAlign = vAlign
self.dash = dash
def __repr__(self):
return "HRFlowable(width=%s, height=%s)" % (self.width, self.height)
def wrap(self, availWidth, availHeight):
w = self.width
if type(w) is type(''):
w = w.strip()
if w.endswith('%'): w = availWidth*float(w[:-1])*0.01
else: w = float(w)
w = min(w,availWidth)
self._width = w
return w, self.lineWidth
def draw(self):
canv = self.canv
canv.saveState()
canv.setLineWidth(self.lineWidth)
canv.setLineCap({'butt':0,'round':1, 'square': 2}[self.lineCap.lower()])
canv.setStrokeColor(self.color)
if self.dash: canv.setDash(self.dash)
canv.line(0, 0, self._width, self.height)
canv.restoreState()
class _PTOInfo:
def __init__(self,trailer,header):
self.trailer = _flowableSublist(trailer)
self.header = _flowableSublist(header)
def cdeepcopy(obj):
if hasattr(obj,'deepcopy'):
return obj.deepcopy()
else:
return deepcopy(obj)
class _Container(_ContainerSpace): #Abstract some common container like behaviour
def drawOn(self, canv, x, y, _sW=0, scale=1.0, content=None, aW=None):
'''we simulate being added to a frame'''
from reportlab.platypus.doctemplate import ActionFlowable, Indenter
x0 = x
y0 = y
pS = 0
if aW is None: aW = self.width
aW *= scale
if content is None:
content = self._content
x = self._hAlignAdjust(x,_sW*scale)
y += self.height*scale
yt = y
frame = getattr(self,'_frame',None)
for c in content:
if not ignoreContainerActions and isinstance(c,ActionFlowable):
c.apply(self.canv._doctemplate)
continue
if isinstance(c,Indenter):
x += c.left*scale
aW -= (c.left+c.right)*scale
continue
w, h = c.wrapOn(canv,aW,0xfffffff)
if (w<_FUZZ or h<_FUZZ) and not getattr(c,'_ZEROSIZE',None): continue
if yt!=y:
s = c.getSpaceBefore()
if not getattr(c,'_SPACETRANSFER',False):
h += max(s-pS,0)
y -= h
fbg = getattr(frame,'_frameBGs',None)
s = c.getSpaceAfter()
if getattr(c,'_SPACETRANSFER',False):
s = pS
pS = s
if fbg:
fbgl, fbgr, fbgc = fbg[-1]
fbw = scale*(frame._width-fbgl-fbgr)
fbh = y + h + pS
fby = max(y0,y-pS)
fbh = max(0,fbh-fby)
if abs(fbw)>_FUZZ and abs(fbh)>_FUZZ:
canv.saveState()
canv.setFillColor(fbgc)
canv.rect(x0+scale*(fbgl-frame._leftPadding)-0.1,fby-0.1,fbw+0.2,fbh+0.2,stroke=0,fill=1)
canv.restoreState()
c._frame = frame
c.drawOn(canv,x,y,_sW=aW-w)
if c is not content[-1] and not getattr(c,'_SPACETRANSFER',None):
y -= pS
del c._frame
def copyContent(self,content=None):
C = [].append
for c in (content or self._content):
C(cdeepcopy(c))
self._content = C.__self__
class PTOContainer(_Container,Flowable):
'''PTOContainer(contentList,trailerList,headerList)
A container for flowables decorated with trailer & header lists.
If the split operation would be called then the trailer and header
lists are injected before and after the split. This allows specialist
"please turn over" and "continued from previous" like behaviours.'''
def __init__(self,content,trailer=None,header=None):
I = _PTOInfo(trailer,header)
self._content = C = []
for _ in _flowableSublist(content):
if isinstance(_,PTOContainer):
C.extend(_._content)
else:
C.append(_)
if not hasattr(_,'_ptoinfo'): _._ptoinfo = I
def wrap(self,availWidth,availHeight):
self.width, self.height = _listWrapOn(self._content,availWidth,self.canv)
return self.width,self.height
def split(self, availWidth, availHeight):
from reportlab.platypus.doctemplate import Indenter
if availHeight<0: return []
canv = self.canv
C = self._content
x = i = H = pS = hx = 0
n = len(C)
I2W = {}
dLeft = dRight = 0
for x in xrange(n):
c = C[x]
I = c._ptoinfo
if I not in I2W.keys():
T = I.trailer
Hdr = I.header
tW, tH = _listWrapOn(T, availWidth, self.canv)
if len(T): #trailer may have no content
tSB = T[0].getSpaceBefore()
else:
tSB = 0
I2W[I] = T,tW,tH,tSB
else:
T,tW,tH,tSB = I2W[I]
_, h = c.wrapOn(canv,availWidth,0xfffffff)
if isinstance(c,Indenter):
dw = c.left+c.right
dLeft += c.left
dRight += c.right
availWidth -= dw
pS = 0
hx = 0
else:
if x:
hx = max(c.getSpaceBefore()-pS,0)
h += hx
pS = c.getSpaceAfter()
H += h+pS
tHS = tH+max(tSB,pS)
if H+tHS>=availHeight-_FUZZ: break
i += 1
#first retract last thing we tried
H -= (h+pS)
#attempt a sub split on the last one we have
aH = (availHeight-H-tHS-hx)*0.99999
if aH>=0.05*availHeight:
SS = c.splitOn(canv,availWidth,aH)
else:
SS = []
if abs(dLeft)+abs(dRight)>1e-8:
R1I = [Indenter(-dLeft,-dRight)]
R2I = [Indenter(dLeft,dRight)]
else:
R1I = R2I = []
if not SS:
j = i
while i>1 and C[i-1].getKeepWithNext():
i -= 1
C[i].keepWithNext = 0
if i==1 and C[0].getKeepWithNext():
#robin's black sheep
i = j
C[0].keepWithNext = 0
F = [UseUpSpace()]
if len(SS)>1:
R1 = C[:i]+SS[:1]+R1I+T+F
R2 = Hdr+R2I+SS[1:]+C[i+1:]
elif not i:
return []
else:
R1 = C[:i]+R1I+T+F
R2 = Hdr+R2I+C[i:]
T = R1 + [PTOContainer(R2,[copy(x) for x in I.trailer],[copy(x) for x in I.header])]
return T
#utility functions used by KeepInFrame
def _hmodel(s0,s1,h0,h1):
# calculate the parameters in the model
# h = a/s**2 + b/s
a11 = 1./s0**2
a12 = 1./s0
a21 = 1./s1**2
a22 = 1./s1
det = a11*a22-a12*a21
b11 = a22/det
b12 = -a12/det
b21 = -a21/det
b22 = a11/det
a = b11*h0+b12*h1
b = b21*h0+b22*h1
return a,b
def _qsolve(h,ab):
'''solve the model v = a/s**2 + b/s for an s which gives us v==h'''
a,b = ab
if abs(a)<=_FUZZ:
return b/h
t = 0.5*b/a
from math import sqrt
f = -h/a
r = t*t-f
if r<0: return None
r = sqrt(r)
if t>=0:
s1 = -t - r
else:
s1 = -t + r
s2 = f/s1
return max(1./s1, 1./s2)
class KeepInFrame(_Container,Flowable):
def __init__(self, maxWidth, maxHeight, content=[], mergeSpace=1, mode='shrink', name='',hAlign='LEFT',vAlign='BOTTOM', fakeWidth=None):
'''mode describes the action to take when overflowing
error raise an error in the normal way
continue ignore ie just draw it and report maxWidth, maxHeight
shrink shrinkToFit
truncate fit as much as possible
set fakeWidth to False to make _listWrapOn do the 'right' thing
'''
self.name = name
self.maxWidth = maxWidth
self.maxHeight = maxHeight
self.mode = mode
assert mode in ('error','overflow','shrink','truncate'), '%s invalid mode value %s' % (self.identity(),mode)
assert maxHeight>=0, '%s invalid maxHeight value %s' % (self.identity(),maxHeight)
if mergeSpace is None: mergeSpace = overlapAttachedSpace
self.mergespace = mergeSpace
self._content = content or []
self.vAlign = vAlign
self.hAlign = hAlign
self.fakeWidth = fakeWidth
def _getAvailableWidth(self):
return self.maxWidth - self._leftExtraIndent - self._rightExtraIndent
def identity(self, maxLen=None):
return "<%s at %s%s%s> size=%sx%s" % (self.__class__.__name__, hex(id(self)), self._frameName(),
getattr(self,'name','') and (' name="%s"'% getattr(self,'name','')) or '',
getattr(self,'maxWidth','') and (' maxWidth=%s'%fp_str(getattr(self,'maxWidth',0))) or '',
getattr(self,'maxHeight','')and (' maxHeight=%s' % fp_str(getattr(self,'maxHeight')))or '')
def wrap(self,availWidth,availHeight):
from reportlab.platypus.doctemplate import LayoutError
mode = self.mode
maxWidth = float(min(self.maxWidth or availWidth,availWidth))
maxHeight = float(min(self.maxHeight or availHeight,availHeight))
fakeWidth = self.fakeWidth
W, H = _listWrapOn(self._content,maxWidth,self.canv, fakeWidth=fakeWidth)
if (mode=='error' and (W>maxWidth+_FUZZ or H>maxHeight+_FUZZ)):
ident = 'content %sx%s too large for %s' % (W,H,self.identity(30))
#leave to keep apart from the raise
raise LayoutError(ident)
elif W<=maxWidth+_FUZZ and H<=maxHeight+_FUZZ:
self.width = W-_FUZZ #we take what we get
self.height = H-_FUZZ
elif mode in ('overflow','truncate'): #we lie
self.width = min(maxWidth,W)-_FUZZ
self.height = min(maxHeight,H)-_FUZZ
else:
def func(x):
x = float(x)
W, H = _listWrapOn(self._content,x*maxWidth,self.canv, fakeWidth=fakeWidth)
W /= x
H /= x
return W, H
W0 = W
H0 = H
s0 = 1
if W>maxWidth+_FUZZ:
#squeeze out the excess width and or Height
s1 = W/maxWidth #linear model
W, H = func(s1)
if H<=maxHeight+_FUZZ:
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s1
return W,H
s0 = s1
H0 = H
W0 = W
s1 = H/maxHeight
W, H = func(s1)
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s1
if H<min(0.95*maxHeight,maxHeight-10) or H>=maxHeight+_FUZZ:
#the standard case W should be OK, H is short we want
#to find the smallest s with H<=maxHeight
H1 = H
for f in 0, 0.01, 0.05, 0.10, 0.15:
#apply the quadratic model
s = _qsolve(maxHeight*(1-f),_hmodel(s0,s1,H0,H1))
W, H = func(s)
if H<=maxHeight+_FUZZ and W<=maxWidth+_FUZZ:
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s
break
return self.width, self.height
def drawOn(self, canv, x, y, _sW=0):
scale = getattr(self,'_scale',1.0)
truncate = self.mode=='truncate'
ss = scale!=1.0 or truncate
if ss:
canv.saveState()
if truncate:
p = canv.beginPath()
p.rect(x, y, self.width,self.height)
canv.clipPath(p,stroke=0)
else:
canv.translate(x,y)
x=y=0
canv.scale(1.0/scale, 1.0/scale)
_Container.drawOn(self, canv, x, y, _sW=_sW, scale=scale)
if ss: canv.restoreState()
class ImageAndFlowables(_Container,Flowable):
'''combine a list of flowables and an Image'''
def __init__(self,I,F,imageLeftPadding=0,imageRightPadding=3,imageTopPadding=0,imageBottomPadding=3,
imageSide='right', imageHref=None):
self._content = _flowableSublist(F)
self._I = I
self._irpad = imageRightPadding
self._ilpad = imageLeftPadding
self._ibpad = imageBottomPadding
self._itpad = imageTopPadding
self._side = imageSide
self.imageHref = imageHref
def deepcopy(self):
c = copy(self) #shallow
self._reset()
c.copyContent() #partially deep?
return c
def getSpaceAfter(self):
if hasattr(self,'_C1'):
C = self._C1
elif hasattr(self,'_C0'):
C = self._C0
else:
C = self._content
return _Container.getSpaceAfter(self,C)
def getSpaceBefore(self):
return max(self._I.getSpaceBefore(),_Container.getSpaceBefore(self))
def _reset(self):
for a in ('_wrapArgs','_C0','_C1'):
try:
delattr(self,a)
except:
pass
def wrap(self,availWidth,availHeight):
canv = self.canv
I = self._I
if hasattr(self,'_wrapArgs'):
if self._wrapArgs==(availWidth,availHeight) and getattr(I,'_oldDrawSize',None) is None:
return self.width,self.height
self._reset()
I._unRestrictSize()
self._wrapArgs = availWidth, availHeight
I.wrap(availWidth,availHeight)
wI, hI = I._restrictSize(availWidth,availHeight)
self._wI = wI
self._hI = hI
ilpad = self._ilpad
irpad = self._irpad
ibpad = self._ibpad
itpad = self._itpad
self._iW = iW = availWidth - irpad - wI - ilpad
aH = itpad + hI + ibpad
if iW>_FUZZ:
W,H0,self._C0,self._C1 = self._findSplit(canv,iW,aH)
else:
W = availWidth
H0 = 0
if W>iW+_FUZZ:
self._C0 = []
self._C1 = self._content
aH = self._aH = max(aH,H0)
self.width = availWidth
if not self._C1:
self.height = aH
else:
W1,H1 = _listWrapOn(self._C1,availWidth,canv)
self.height = aH+H1
return self.width, self.height
def split(self,availWidth, availHeight):
if hasattr(self,'_wrapArgs'):
I = self._I
if self._wrapArgs!=(availWidth,availHeight) or getattr(I,'_oldDrawSize',None) is not None:
self._reset()
I._unRestrictSize()
W,H=self.wrap(availWidth,availHeight)
if self._aH>availHeight: return []
C1 = self._C1
if C1:
S = C1[0].split(availWidth,availHeight-self._aH)
if not S:
_C1 = []
else:
_C1 = [S[0]]
C1 = S[1:]+C1[1:]
else:
_C1 = []
return [ImageAndFlowables(
self._I,
self._C0+_C1,
imageLeftPadding=self._ilpad,
imageRightPadding=self._irpad,
imageTopPadding=self._itpad,
imageBottomPadding=self._ibpad,
imageSide=self._side, imageHref=self.imageHref)
]+C1
def drawOn(self, canv, x, y, _sW=0):
if self._side=='left':
Ix = x + self._ilpad
Fx = Ix+ self._irpad + self._wI
else:
Ix = x + self.width-self._wI-self._irpad
Fx = x
self._I.drawOn(canv,Ix,y+self.height-self._itpad-self._hI)
if self.imageHref:
canv.linkURL(self.imageHref, (Ix, y+self.height-self._itpad-self._hI, Ix + self._wI, y+self.height), relative=1)
if self._C0:
_Container.drawOn(self, canv, Fx, y, content=self._C0, aW=self._iW)
if self._C1:
aW, aH = self._wrapArgs
_Container.drawOn(self, canv, x, y-self._aH,content=self._C1, aW=aW)
def _findSplit(self,canv,availWidth,availHeight,mergeSpace=1,obj=None):
'''return max width, required height for a list of flowables F'''
W = 0
H = 0
pS = sB = 0
atTop = 1
F = self._content
for i,f in enumerate(F):
w,h = f.wrapOn(canv,availWidth,0xfffffff)
if w<=_FUZZ or h<=_FUZZ: continue
W = max(W,w)
if not atTop:
s = f.getSpaceBefore()
if mergeSpace: s = max(s-pS,0)
H += s
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
if H>=availHeight or w>availWidth:
return W, availHeight, F[:i],F[i:]
H += h
if H>availHeight:
from reportlab.platypus.paragraph import Paragraph
aH = availHeight-(H-h)
if isinstance(f,(Paragraph,Preformatted)):
leading = f.style.leading
nH = leading*int(aH/float(leading))+_FUZZ
if nH<aH: nH += leading
availHeight += nH-aH
aH = nH
S = cdeepcopy(f).splitOn(canv,availWidth,aH)
if not S:
return W, availHeight, F[:i],F[i:]
else:
return W,availHeight,F[:i]+S[:1],S[1:]+F[i+1:]
pS = f.getSpaceAfter()
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H-pS, F, []
class AnchorFlowable(Spacer):
'''create a bookmark in the pdf'''
_ZEROSIZE=1
_SPACETRANSFER = True
def __init__(self,name):
Spacer.__init__(self,0,0)
self._name = name
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,self._name)
def wrap(self,aW,aH):
return 0,0
def draw(self):
self.canv.bookmarkHorizontal(self._name,0,0)
class FrameBG(AnchorFlowable):
"""Start or stop coloring the frame background
left & right are distances from the edge of the frame to start stop colouring.
"""
_ZEROSIZE=1
def __init__(self, color=None, left=0, right=0, start=True):
Spacer.__init__(self,0,0)
self.start = start
if start:
from reportlab.platypus.doctemplate import _evalMeasurement
self.left = _evalMeasurement(left)
self.right = _evalMeasurement(right)
self.color = color
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,', '.join(['%s=%r' % (i,getattr(self,i,None)) for i in 'start color left right'.split()]))
def draw(self):
frame = getattr(self,'_frame',None)
if frame is None: return
if self.start:
w = getattr(frame,'_lineWidth',0)
frame._frameBGs.append((self.left,self.right,self.color))
elif frame._frameBGs:
frame._frameBGs.pop()
class FrameSplitter(NullDraw):
'''When encountered this flowable should either switch directly to nextTemplate
if remaining space in the current frame is less than gap+required or it should
temporarily modify the current template to have the frames from nextTemplate
that are listed in nextFrames and switch to the first of those frames.
'''
_ZEROSIZE=1
def __init__(self,nextTemplate,nextFrames=[],gap=10,required=72):
self.nextTemplate=nextTemplate
self.nextFrames=nextFrames or []
self.gap=gap
self.required=required
def wrap(self,aW,aH):
frame = self._frame
from reportlab.platypus.doctemplate import NextPageTemplate,CurrentFrameFlowable,LayoutError
G=[NextPageTemplate(self.nextTemplate)]
if aH<self.gap+self.required-_FUZZ:
#we are going straight to the nextTemplate with no attempt to modify the frames
G.append(PageBreak())
else:
#we are going to modify the incoming templates
templates = self._doctemplateAttr('pageTemplates')
if templates is None:
raise LayoutError('%s called in non-doctemplate environment'%self.identity())
T=[t for t in templates if t.id==self.nextTemplate]
if not T:
raise LayoutError('%s.nextTemplate=%s not found' % (self.identity(),self.nextTemplate))
T=T[0]
F=[f for f in T.frames if f.id in self.nextFrames]
N=[f.id for f in F]
N=[f for f in self.nextFrames if f not in N]
if N:
raise LayoutError('%s frames=%r not found in pageTemplate(%s)\n%r has frames %r' % (self.identity(),N,T.id,T,[f.id for f in T.frames]))
T=self._doctemplateAttr('pageTemplate')
def unwrap(canv,doc,T=T,onPage=T.onPage,oldFrames=T.frames):
T.frames=oldFrames
T.onPage=onPage
onPage(canv,doc)
T.onPage=unwrap
h=aH-self.gap
for i,f in enumerate(F):
f=copy(f)
f.height=h
f._reset()
F[i]=f
T.frames=F
G.append(CurrentFrameFlowable(F[0].id))
frame.add_generated_content(*G)
return 0,0
from reportlab.lib.sequencer import _type2formatter
_bulletNames = dict(
bulletchar=u'\u2022', #usually a small circle
circle=u'\u25cf', #circle as high as the font
square=u'\u25a0',
disc=u'\u25cf',
diamond=u'\u25c6',
rarrowhead=u'\u27a4',
)
def _bulletFormat(value,type='1',format=None):
if type=='bullet':
s = _bulletNames.get(value,value)
else:
s = _type2formatter[type](int(value))
if format:
if isinstance(format,str):
s = format % s
elif isinstance(format, collections.Callable):
s = format(s)
else:
raise ValueError('unexpected BulletDrawer format %r' % format)
return s
class BulletDrawer:
def __init__(self,
value='0',
bulletAlign='left',
bulletType='1',
bulletColor='black',
bulletFontName='Helvetica',
bulletFontSize=12,
bulletOffsetY=0,
bulletDedent=0,
bulletDir='ltr',
bulletFormat=None,
):
self.value = value
self._bulletAlign = bulletAlign
self._bulletType = bulletType
self._bulletColor = bulletColor
self._bulletFontName = bulletFontName
self._bulletFontSize = bulletFontSize
self._bulletOffsetY = bulletOffsetY
self._bulletDedent = bulletDedent
self._bulletDir = bulletDir
self._bulletFormat = bulletFormat
def drawOn(self,indenter,canv,x,y,_sW=0):
value = self.value
if not value: return
canv.saveState()
canv.translate(x, y)
y = indenter.height-self._bulletFontSize+self._bulletOffsetY
if self._bulletDir=='rtl':
x = indenter.width - indenter._rightIndent + self._bulletDedent
else:
x = indenter._leftIndent - self._bulletDedent
canv.setFont(self._bulletFontName,self._bulletFontSize)
canv.setFillColor(self._bulletColor)
bulletAlign = self._bulletAlign
value = _bulletFormat(value,self._bulletType,self._bulletFormat)
if bulletAlign=='left':
canv.drawString(x,y,value)
elif bulletAlign=='right':
canv.drawRightString(x,y,value)
elif bulletAlign in ('center','centre'):
canv.drawCentredString(x,y,value)
elif bulletAlign.startswith('numeric') or bulletAlign.startswith('decimal'):
pc = bulletAlign[7:].strip() or '.'
canv.drawAlignedString(x,y,value,pc)
else:
raise ValueError('Invalid bulletAlign: %r' % bulletAlign)
canv.restoreState()
def _computeBulletWidth(b,value):
value = _bulletFormat(value,b._bulletType,b._bulletFormat)
return stringWidth(value,b._bulletFontName,b._bulletFontSize)
class DDIndenter(Flowable):
_IndenterAttrs = '_flowable _leftIndent _rightIndent width height'.split()
def __init__(self,flowable,leftIndent=0,rightIndent=0):
self._flowable = flowable
self._leftIndent = leftIndent
self._rightIndent = rightIndent
self.width = None
self.height = None
def split(self, aW, aH):
S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH)
return [
DDIndenter(s,
leftIndent=self._leftIndent,
rightIndent=self._rightIndent,
) for s in S
]
def drawOn(self, canv, x, y, _sW=0):
self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent))
def wrap(self, aW, aH):
w,h = self._flowable.wrap(aW-self._leftIndent-self._rightIndent, aH)
self.width = w+self._leftIndent+self._rightIndent
self.height = h
return self.width,h
def __getattr__(self,a):
if a in self._IndenterAttrs:
try:
return self.__dict__[a]
except KeyError:
if a not in ('spaceBefore','spaceAfter'):
raise
return getattr(self._flowable,a)
def __setattr__(self,a,v):
if a in self._IndenterAttrs:
self.__dict__[a] = v
else:
setattr(self._flowable,a,v)
def __delattr__(self,a):
if a in self._IndenterAttrs:
del self.__dict__[a]
else:
delattr(self._flowable,a)
def identity(self,maxLen=None):
return '%s containing %s' % (self.__class__.__name__,self._flowable.identity(maxLen))
class LIIndenter(DDIndenter):
_IndenterAttrs = '_flowable _bullet _leftIndent _rightIndent width height spaceBefore spaceAfter'.split()
def __init__(self,flowable,leftIndent=0,rightIndent=0,bullet=None, spaceBefore=None, spaceAfter=None):
self._flowable = flowable
self._bullet = bullet
self._leftIndent = leftIndent
self._rightIndent = rightIndent
self.width = None
self.height = None
if spaceBefore is not None:
self.spaceBefore = spaceBefore
if spaceAfter is not None:
self.spaceAfter = spaceAfter
def split(self, aW, aH):
S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH)
return [
LIIndenter(s,
leftIndent=self._leftIndent,
rightIndent=self._rightIndent,
bullet = (s is S[0] and self._bullet or None),
) for s in S
]
def drawOn(self, canv, x, y, _sW=0):
if self._bullet:
self._bullet.drawOn(self,canv,x,y,0)
self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent))
from reportlab.lib.styles import ListStyle
class ListItem:
def __init__(self,
flowables, #the initial flowables
style=None,
#leftIndent=18,
#rightIndent=0,
#spaceBefore=None,
#spaceAfter=None,
#bulletType='1',
#bulletColor='black',
#bulletFontName='Helvetica',
#bulletFontSize=12,
#bulletOffsetY=0,
#bulletDedent='auto',
#bulletDir='ltr',
#bulletFormat=None,
**kwds
):
if not isinstance(flowables,(list,tuple)):
flowables = (flowables,)
self._flowables = flowables
params = self._params = {}
if style:
if not isinstance(style,ListStyle):
raise ValueError('%s style argument (%r) not a ListStyle' % (self.__class__.__name__,style))
self._style = style
for k in ListStyle.defaults:
if k in kwds:
v = kwds.get(k)
elif style:
v = getattr(style,k)
else:
continue
params[k] = v
for k in ('value', 'spaceBefore','spaceAfter'):
v = kwds.get(k,getattr(style,k,None))
if v is not None:
params[k] = v
class _LIParams:
def __init__(self,flowable,params,value,first):
self.flowable = flowable
self.params = params
self.value = value
self.first= first
class ListFlowable(_Container,Flowable):
def __init__(self,
flowables, #the initial flowables
start=1,
style=None,
#leftIndent=18,
#rightIndent=0,
#spaceBefore=None,
#spaceAfter=None,
#bulletType='1',
#bulletColor='black',
#bulletFontName='Helvetica',
#bulletFontSize=12,
#bulletOffsetY=0,
#bulletDedent='auto',
#bulletDir='ltr',
#bulletFormat=None,
**kwds
):
self._flowables = flowables
if style:
if not isinstance(style,ListStyle):
raise ValueError('%s style argument not a ListStyle' % self.__class__.__name__)
self.style = style
for k,v in ListStyle.defaults.items():
setattr(self,'_'+k,kwds.get(k,getattr(style,k,v)))
if start is None:
start = getattr(self,'_start',None)
if start is None:
if getattr(self,'_bulletType','1')=='bullet':
start = 'bulletchar'
else:
start = '1'
self._start = start
for k in ('spaceBefore','spaceAfter'):
v = kwds.get(k,getattr(style,k,None))
if v is not None:
setattr(self,k,v)
self._content = self._getContent()
del self._flowables
self._dims = None
def wrap(self,aW,aH):
if self._dims!=aW:
self.width, self.height = _listWrapOn(self._content,aW,self.canv)
self._dims = aW
return self.width,self.height
def split(self,aW,aH):
return self._content
def _flowablesIter(self):
for f in self._flowables:
if isinstance(f,(list,tuple)):
if f:
for i, z in enumerate(f):
yield i==0 and not isinstance(z,LIIndenter), z
elif isinstance(f,ListItem):
params = f._params
if not params:
#meerkat simples just a list like object
for i, z in enumerate(f._flowables):
if isinstance(z,LIIndenter):
raise ValueError('LIIndenter not allowed in ListItem')
yield i==0, z
else:
params = params.copy()
value = params.pop('value',None)
spaceBefore = params.pop('spaceBefore',None)
spaceAfter = params.pop('spaceAfter',None)
n = len(f._flowables) - 1
for i, z in enumerate(f._flowables):
P = params.copy()
if not i and spaceBefore is not None:
P['spaceBefore'] = spaceBefore
if i==n and spaceAfter is not None:
P['spaceAfter'] = spaceAfter
if i: value=None
yield 0, _LIParams(z,P,value,i==0)
else:
yield not isinstance(f,LIIndenter), f
def _makeLIIndenter(self,flowable, bullet, params=None):
if params:
leftIndent = params.get('leftIndent',self._leftIndent)
rightIndent = params.get('rightIndent',self._rightIndent)
spaceBefore = params.get('spaceBefore',None)
spaceAfter = params.get('spaceAfter',None)
return LIIndenter(flowable,leftIndent,rightIndent,bullet,spaceBefore=spaceBefore,spaceAfter=spaceAfter)
else:
return LIIndenter(flowable,self._leftIndent,self._rightIndent,bullet)
def _makeBullet(self,value,params=None):
if params is None:
def getp(a):
return getattr(self,'_'+a)
else:
style = getattr(params,'style',None)
def getp(a):
if a in params: return params[a]
if style and a in style.__dict__: return getattr(self,a)
return getattr(self,'_'+a)
return BulletDrawer(
value=value,
bulletAlign=getp('bulletAlign'),
bulletType=getp('bulletType'),
bulletColor=getp('bulletColor'),
bulletFontName=getp('bulletFontName'),
bulletFontSize=getp('bulletFontSize'),
bulletOffsetY=getp('bulletOffsetY'),
bulletDedent=getp('calcBulletDedent'),
bulletDir=getp('bulletDir'),
bulletFormat=getp('bulletFormat'),
)
def _getContent(self):
value = self._start
bt = self._bulletType
inc = int(bt in '1aAiI')
if inc: value = int(value)
bd = self._bulletDedent
if bd=='auto':
align = self._bulletAlign
dir = self._bulletDir
if dir=='ltr' and align=='left':
bd = self._leftIndent
elif align=='right':
bd = self._rightIndent
else:
#we need to work out the maximum width of any of the labels
tvalue = value
maxW = 0
for d,f in self._flowablesIter():
if d:
maxW = max(maxW,_computeBulletWidth(self,tvalue))
if inc: tvalue += inc
elif isinstance(f,LIIndenter):
b = f._bullet
if b:
if b.bulletType==bt:
maxW = max(maxW,_computeBulletWidth(b,b.value))
tvalue = int(b.value)
else:
maxW = max(maxW,_computeBulletWidth(self,tvalue))
if inc: tvalue += inc
if dir=='ltr':
if align=='right':
bd = self._leftIndent - maxW
else:
bd = self._leftIndent - maxW*0.5
elif align=='left':
bd = self._rightIndent - maxW
else:
bd = self._rightIndent - maxW*0.5
self._calcBulletDedent = bd
S = []
aS = S.append
i=0
for d,f in self._flowablesIter():
fparams = {}
if not i:
i += 1
spaceBefore = getattr(self,'spaceBefore',None)
if spaceBefore is not None:
fparams['spaceBefore'] = spaceBefore
if d:
aS(self._makeLIIndenter(f,bullet=self._makeBullet(value),params=fparams))
if inc: value += inc
elif isinstance(f,LIIndenter):
b = f._bullet
if b:
if b.bulletType!=bt:
raise ValueError('Included LIIndenter bulletType=%s != OrderedList bulletType=%s' % (b.bulletType,bt))
value = int(b.value)
else:
f._bullet = self._makeBullet(value,params=getattr(f,'params',None))
if fparams:
f.__dict__['spaceBefore'] = max(f.__dict__.get('spaceBefore',0),spaceBefore)
aS(f)
if inc: value += inc
elif isinstance(f,_LIParams):
fparams.update(f.params)
z = self._makeLIIndenter(f.flowable,bullet=None,params=fparams)
if f.first:
if f.value is not None:
value = f.value
if inc: value = int(value)
z._bullet = self._makeBullet(value,f.params)
if inc: value += inc
aS(z)
else:
aS(self._makeLIIndenter(f,bullet=None,params=fparams))
spaceAfter = getattr(self,'spaceAfter',None)
if spaceAfter is not None:
f=S[-1]
f.__dict__['spaceAfter'] = max(f.__dict__.get('spaceAfter',0),spaceAfter)
return S
class TopPadder(Flowable):
'''wrap a single flowable so that its first bit will be
padded to fill out the space so that it appears at the
bottom of its frame'''
def __init__(self,f):
self.__dict__['_TopPadder__f'] = f
def wrap(self,aW,aH):
w,h = self.__f.wrap(aW,aH)
self.__dict__['_TopPadder__dh'] = aH-h
return w,h
def split(self,aW,aH):
S = self.__f.split(aW,aH)
if len(S)>1:
S[0] = TopPadder(S[0])
return S
def drawOn(self, canvas, x, y, _sW=0):
self.__f.drawOn(canvas,x,y-max(0,self.__dh-1e-8),_sW)
def __setattr__(self,a,v):
setattr(self.__f,a,v)
def __getattr__(self,a):
return getattr(self.__f,a)
def __delattr__(self,a):
delattr(self.__f,a)
class DocAssign(NullDraw):
'''At wrap time this flowable evaluates var=expr in the doctemplate namespace'''
_ZEROSIZE=1
def __init__(self,var,expr,life='forever'):
Flowable.__init__(self)
self.args = var,expr,life
def funcWrap(self,aW,aH):
NS=self._doctemplateAttr('_nameSpace')
NS.update(dict(availableWidth=aW,availableHeight=aH))
try:
return self.func()
finally:
for k in 'availableWidth','availableHeight':
try:
del NS[k]
except:
pass
def func(self):
return self._doctemplateAttr('d'+self.__class__.__name__[1:])(*self.args)
def wrap(self,aW,aH):
self.funcWrap(aW,aH)
return 0,0
class DocExec(DocAssign):
'''at wrap time exec stmt in doc._nameSpace'''
def __init__(self,stmt,lifetime='forever'):
Flowable.__init__(self)
self.args=stmt,lifetime
class DocPara(DocAssign):
'''at wrap time create a paragraph with the value of expr as text
if format is specified it should use %(__expr__)s for string interpolation
of the expression expr (if any). It may also use %(name)s interpolations
for other variables in the namespace.
suitable defaults will be used if style and klass are None
'''
def __init__(self,expr,format=None,style=None,klass=None,escape=True):
Flowable.__init__(self)
self.expr=expr
self.format=format
self.style=style
self.klass=klass
self.escape=escape
def func(self):
expr = self.expr
if expr:
if not isinstance(expr,str): expr = str(expr)
return self._doctemplateAttr('docEval')(expr)
def add_content(self,*args):
self._doctemplateAttr('frame').add_generated_content(*args)
def get_value(self,aW,aH):
value = self.funcWrap(aW,aH)
if self.format:
NS=self._doctemplateAttr('_nameSpace').copy()
NS.update(dict(availableWidth=aW,availableHeight=aH))
NS['__expr__'] = value
value = self.format % NS
else:
value = str(value)
return value
def wrap(self,aW,aH):
value = self.get_value(aW,aH)
P = self.klass
if not P:
from reportlab.platypus.paragraph import Paragraph as P
style = self.style
if not style:
from reportlab.lib.styles import getSampleStyleSheet
style=getSampleStyleSheet()['Code']
if self.escape:
from xml.sax.saxutils import escape
value=escape(value)
self.add_content(P(value,style=style))
return 0,0
class DocAssert(DocPara):
def __init__(self,cond,format=None):
Flowable.__init__(self)
self.expr=cond
self.format=format
def funcWrap(self,aW,aH):
self._cond = DocPara.funcWrap(self,aW,aH)
return self._cond
def wrap(self,aW,aH):
value = self.get_value(aW,aH)
if not bool(self._cond):
raise AssertionError(value)
return 0,0
class DocIf(DocPara):
def __init__(self,cond,thenBlock,elseBlock=[]):
Flowable.__init__(self)
self.expr = cond
self.blocks = elseBlock or [],thenBlock
def checkBlock(self,block):
if not isinstance(block,(list,tuple)):
block = (block,)
return block
def wrap(self,aW,aH):
self.add_content(*self.checkBlock(self.blocks[int(bool(self.funcWrap(aW,aH)))]))
return 0,0
class DocWhile(DocIf):
def __init__(self,cond,whileBlock):
Flowable.__init__(self)
self.expr = cond
self.block = self.checkBlock(whileBlock)
def wrap(self,aW,aH):
if bool(self.funcWrap(aW,aH)):
self.add_content(*(list(self.block)+[self]))
return 0,0
class SetTopFlowables(NullDraw):
_ZEROZSIZE = 1
def __init__(self,F,show=False):
self._F = F
self._show = show
def wrap(self,aW,aH):
doc = getattr(getattr(self,'canv',None),'_doctemplate',None)
if doc:
doc._topFlowables=self._F
if self._show and self._F:
doc.frame._generated_content = self._F
return 0,0
| bsd-3-clause | 4,462,128,863,631,193,600 | 35.542585 | 152 | 0.5512 | false |
blendit/crowd | GUI_crowd_MapPanel.py | 1 | 6603 | import bpy
from bpy.types import Menu, Panel
from bpy.props import *
import os
import sys
import subprocess
import ast
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(script_dir)
# Get system's python path
proc = subprocess.Popen('python3 -c "import sys; print(sys.path)"', stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
paths = ast.literal_eval(out.decode("utf-8"))
sys.path += (paths)
import blendit.SimulationData as Sim
import pickle as pic
def initSceneProperties(scn):
bpy.types.Scene.PosX = FloatProperty(
name="X",
description="position of the origin")
scn['PosX'] = 0
bpy.types.Scene.PosY = FloatProperty(
name="Y",
description="position of the origin")
scn['PosY'] = 0
bpy.types.Scene.MinX = FloatProperty(
name="Min",
description="Bound of the map")
scn['MinX'] = -float("inf")
bpy.types.Scene.MaxX = FloatProperty(
name="Max",
description="Bound of the map")
scn['MaxX'] = float("inf")
bpy.types.Scene.MinY = FloatProperty(
name="Max",
description="Bound of the map")
scn['MinY'] = -float("inf")
bpy.types.Scene.MaxY = FloatProperty(
name="Max",
description="Bound of the map")
scn['MaxY'] = float("inf")
bpy.types.Scene.GridP = FloatProperty(
name="P",
description="Grid precision",
subtype='PERCENTAGE',
default=100,
min=0,
max=100)
scn['GridP'] = 100
bpy.types.Scene.SelectString = StringProperty(
name="Input",
description="Enter an input file",
subtype='FILE_PATH')
scn['SelectString'] = "filename.py"
bpy.types.Scene.SaveString = StringProperty(
name="Output",
description="Enter an output file",
subtype='FILE_PATH')
scn['SaveString'] = "filename.py"
return
initSceneProperties(bpy.context.scene)
#
#
# class MapButtonsPanel(Panel):
# bl_category = 'Map'
# bl_space_type = 'VIEW_3D'
# bl_region_type = 'TOOLS'
#
# def draw(self, context):
# layout = self.layout
# scn = context.scene
class InputFile_Tools(Panel):
bl_label = "Input File"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.prop(scn, 'SelectString')
layout.operator("env.select")
layout.prop(scn, 'SaveString')
layout.operator("env.save")
class MapOrigin_Tools(Panel):
bl_label = "Map Origin"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
# COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
scn = context.scene
layout.label(text="Origin Position:")
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.prop(scn, 'PosX')
row.prop(scn, 'PosY')
layout.operator("env.origin")
layout.operator("env.set")
class MapSize_Tools(Panel):
bl_label = "Map Bounds"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.label(text="X bounds:")
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.prop(scn, 'MinX', text="Min")
row.prop(scn, 'MaxX', text="Max")
layout.label(text="Y bounds:")
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.prop(scn, 'MinY', text="Min")
row.prop(scn, 'MaxY', text="Max")
layout.operator("env.size")
class GridSize_Tools (Panel):
bl_label = "Grid Size"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.prop(scn, 'GridP')
layout.operator("env.grid")
class Generate_Tools (Panel):
bl_label = "Generate Map"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.operator("env.generate")
class MapSelectButton(bpy.types.Operator):
bl_idname = "env.select"
bl_label = "Set input as configuration"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
ic = open(SelectString, "rb")
Sim.graph = pic.load(ic)
ic.close()
return{'FINISHED'}
class MapSaveButton(bpy.types.Operator):
bl_idname = "env.save"
bl_label = "Save configuration"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
oc = open(SaveString, "wb")
pic.dump(Sim.graph, oc)
oc.close()
return{'FINISHED'}
class MapOriginCursorButton(bpy.types.Operator):
bl_idname = "env.origin"
bl_label = "From cursor"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
Pcursor = view.cursor_location
bpy.context.scene.PosX = Pcursor[0]
bpy.context.scene.PosY = Pcursor[1]
scn.cursor_location = (scn.PosX, scn.PosY, 0)
return{'FINISHED'}
class MapOriginButton(bpy.types.Operator):
bl_idname = "env.set"
bl_label = "Set map origin"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
Sim.OriginX = PosX
Sim.OriginY = PosY
return{'FINISHED'}
class MapSizeButton(bpy.types.Operator):
bl_idname = "env.size"
bl_label = "Set map size"
def execute(self, context):
scn = bpy.context.scene
Sim.MinX = MinX
Sim.MaxX = MaxX
Sim.MinY = MinY
Sim.MaxY = MaxY
return{'FINISHED'}
class MapGridButton(bpy.types.Operator):
bl_idname = "env.grid"
bl_label = "Set Grid size"
def execute(self, context):
scn = bpy.context.scene
coefficient = 5 - (GridP / 20)
Sim.Grid = Sim.MinGrid * (10 ** coefficient)
return{'FINISHED'}
class MapGenerationButton(bpy.types.Operator):
bl_idname = "env.generate"
bl_label = "Generate"
def execute(self, context):
scn = bpy.context.scene
Sim.renew_graph()
return{'FINISHED'}
bpy.utils.register_module(__name__)
| gpl-3.0 | 137,303,051,667,729,950 | 25.625 | 103 | 0.593518 | false |
stefantkeller/VECSELsetup | exp/eval/light_light.py | 1 | 8684 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import errorvalues as ev # github.com/stefantkeller/errorvalues
from VECSELsetup.eval.varycolor import varycolor
from VECSELsetup.eval.gen_functions import load, extract, plotinstructions_write, plotinstructions_read, lut_from_calibfolder, lut_interp_from_calibfolder, thermal_resistance
def main():
# before running this script:
# run eval_spectrum.py to provide the .._eval.csv files required for the spectra
# run calibration.py (with appropriate calib measurements)
# and don't forget temperature_heatsink (this is not necessary for this script here, but it provides interesting insights for the measurement at hand)
logfile = '../24_LL_ev/20150211_sample21-1-d6/spot333um.csv'
calib_folder = '../24_LL_ev/20150204_calib_333um_s21-1-d6'
#------------------------------------
# calibration
emis_lut = lut_from_calibfolder(calib_folder,identifiers=['Laser'],ignore_error=False) # emission has constant value solely due to BS, no ND in front of detector etc.
pump_lut, refl_lut = lut_interp_from_calibfolder(calib_folder,identifiers=['Pump','Refl'])
#------------------------------------
# load measurement
current_set, current, pump, refl, laser, spectra, meantemp = extract(logfile, identifiers=['Current','Pump','Refl','Laser','Spectra', 'Temperature'])
Temperatures = sorted(current_set.keys()) # set temperatures (round numbers like 15.0 or 22.5 etc)
T_out = dict((T,meantemp[T].round(1)) for T in Temperatures) # real temperatures for display in plot, including +-uncertainty
#------------------------------------
# calculate using calibration
absorbed, reflected, emitted, pumped, dissipated = {}, {}, {}, {}, {}
for T in Temperatures:
reflected[T] = refl_lut(refl[T])
pumped[T] = pump_lut(pump[T])
absorbed[T] = pumped[T] - reflected[T]
emitted[T] = emis_lut(laser[T])
dissipated[T] = absorbed[T] - emitted[T]
#
#------------------------------------
# invoke instructions for plot and fit
# plotting the data can be tricky to reproduce, store the plot properties in a text file and read from there!
# (easy to repeat the plot at a later time)
# open the instruction file in a text editor, edit the instructions and run this module again; it will use the new instructions
instrfile = logfile[:-4]+'_instr.csv'
plotinstructions_write(instrfile,Temperatures,calib_folder)
#------------------------------------
# retrieve instructions
instr = plotinstructions_read(instrfile)
#
#------------------------------------
# translate instructions
str2lst = lambda s: map(float,s[1:-1].split(','))
textx = float(instr['textx']) # x coordinate for text; same for first two subplots (absorbed-emitted and absorbed-reflectivity)
fontsize = float(instr['fontsize'])
title = instr['title']
xlim = str2lst(instr['xlim']) # range of x-axis; same for first two subplots
ylim1 = str2lst(instr['ylim1']) # range of y-axis of first (aborbed-emitted) plot
ylim2 = str2lst(instr['ylim2']) # range of second y-axis (absorbed-reflectivity)
xlim3 = str2lst(instr['xlim3']) # third x-axis; (dissipated-wavelength)
ylim3 = str2lst(instr['ylim3']) # 3rd y-axis
plot_temps_for_3 = str2lst(instr['plot_temps_for_3']) # which ones to plot? you may have measured a heat sink temperature without lasing output, whose data will confuse the reader, so you don't plot it.
textx3 = float(instr['textx3']) # x-coordinate of text in 3rd plot
texty3 = str2lst(instr['texty3']) # 3rd y-coordinate
llow0 = {}
lhigh0 = {}
texty1 = {}
for T in Temperatures:
llow0[T] = sum(absorbed[T].v()<float(instr['llow0[{0}]'.format(T)])) # index indicating start of lasing activity
lhigh0[T] = sum(absorbed[T].v()<float(instr['lhigh0[{0}]'.format(T)])) # index corresponding to where linear segment stops
texty1[T] = float(instr['texty1[{0}]'.format(T)])
#
#
#------------------------------------
#------------------------------------
# plot
cols = varycolor(3*len(Temperatures))
plt.subplot(3,1,1)
cnt = 0 # color counter
q0,m0 = {},{} # for linreg
for T in Temperatures:
# linreg
q0[T],m0[T] = ev.linreg(absorbed[T].v()[llow0[T]:lhigh0[T]],
emitted[T].v()[llow0[T]:lhigh0[T]],
emitted[T].e()[llow0[T]:lhigh0[T]],
overwrite_zeroerrors=True)
emax,emaxi = ev.max(emitted[T],True)
amax = absorbed[T][emaxi]
print 'Max emission at ({}) degC at ({}) W absorbed power: ({}) W'.format(T_out[T],amax,emax)
# plot
plt.errorbar(absorbed[T].v(),emitted[T].v(),
xerr=absorbed[T].e(),yerr=emitted[T].e(),
c=cols[cnt],linestyle=' ')
plt.plot(absorbed[T].v(),m0[T].v()*absorbed[T].v()+q0[T].v(),c=cols[cnt+1])
plt.text(textx,texty1[T],
'${0}$$^\circ$C: ${1}$ %'.format(T_out[T],m0[T].round(3)*100),
color=cols[cnt],fontsize=fontsize)
cnt+=3
plt.title(title)
plt.xlabel('Absorbed power (W)')
plt.ylabel('Emited power (W)')
plt.xlim(xlim)
plt.ylim(ylim1)
plt.grid('on')
#plt.show()
#------------------------------------
plt.subplot(3,1,2)
cnt = 0 # reset color counter
q1,m1 = {},{}
for T in Temperatures:
relref = reflected[T]/pumped[T]*100
# plot
plt.errorbar(absorbed[T].v(),relref.v(),
xerr=absorbed[T].e(),yerr=relref.e(),
c=cols[cnt],linestyle=' ')
cnt+=3
plt.title(title)
plt.xlabel('Absorbed power (W)')
plt.ylabel('Reflectivity (%)')
plt.xlim(xlim)
plt.ylim(ylim2)
plt.grid('on')
#plt.show()
#------------------------------------
# plot dissipation and spectra
plt.subplot(3,1,3)
cnt = 0 # reset
q3,m3 = {},{}
for T in Temperatures:
if T in plot_temps_for_3:
# lambda_short
#plt.errorbar(dissipated[T].v(),spectra[T][0].v(),
# xerr=dissipated[T].e(),yerr=spectra[T][0].e(),
# c=cols[cnt],linestyle=' ')
# lambda_long
# lin reg for range that lases (>threshold, <roll over), hence instr from subplot 1
q3[T],m3[T] = ev.linreg(dissipated[T].v()[llow0[T]:lhigh0[T]],
spectra[T][1].v()[llow0[T]:lhigh0[T]],
spectra[T][1].e()[llow0[T]:lhigh0[T]],
overwrite_zeroerrors=True)
# show only a part, not to confuse reader
#plt.errorbar(dissipated[T].v()[llow0[T]:lhigh0[T]],spectra[T][1].v()[llow0[T]:lhigh0[T]],
# xerr=dissipated[T].e()[llow0[T]:lhigh0[T]],yerr=spectra[T][1].e()[llow0[T]:lhigh0[T]],
# c=cols[cnt],linestyle=' ')
# show the whole range
plt.errorbar(dissipated[T].v(),spectra[T][1].v(),
xerr=dissipated[T].e(),yerr=spectra[T][1].e(),
c=cols[cnt],linestyle=' ')
cnt += 3
plt.title(title)
plt.xlim(xlim3)
plt.ylim(ylim3)
plt.xlim()
plt.xlabel('Dissipated power (W)')
plt.ylabel('Wavelength (nm)')
plt.grid('on')
cnt = 0 # reset
wavelength = ev.errvallist([q3[T] for T in plot_temps_for_3]) # wavelength offsets
slopes = ev.errvallist([m3[T] for T in plot_temps_for_3]) # slopes
T_active = ev.errvallist([T_out[T] for T in plot_temps_for_3])
dldD, dldT, l0 = thermal_resistance(T_active,wavelength,slopes) #, R_th
R_th = dldD/dldT
for T in Temperatures:
if T in plot_temps_for_3:
plt.plot(dissipated[T].v(),l0.v() + dldT.v()*T_out[T].v() + dldD.v()*dissipated[T].v(),c=cols[cnt+1])
cnt+=3
plt.text(textx3,texty3[0],
'$\lambda=$'+'$({})$'.format(dldT.round(3))+'$T_{hs}+$'+'$({})$'.format(dldD.round(3))+'$D+$'+'${}$'.format(l0.round(3)),
color='k')
R_th = R_th.round(2)
therm_imp = 'Thermal impedance: $({0})$ K/W'.format(R_th)
plt.text(textx3,texty3[1],
therm_imp,color='k')
print therm_imp
for T in Temperatures:
print meantemp[T]
plt.show()
if __name__ == "__main__":
main()
| mit | 7,744,530,179,776,189,000 | 37.767857 | 206 | 0.551474 | false |
metamx/Diamond | src/collectors/rabbitmq/rabbitmq.py | 1 | 5518 | # coding=utf-8
"""
Collects data from RabbitMQ through the admin interface
#### Notes
** With added support for breaking down queue metrics by vhost, we have
attempted to keep results generated by existing configurations from
changing. This means that the old behaviour of clobbering queue metrics
when a single queue name exists in multiple vhosts still exists if the
configuration is not updated. If no vhosts block is defined it will also
keep the metric path as it was historically with no vhost name in it.
old path => systems.myServer.rabbitmq.queues.myQueue.*
new path => systems.myServer.rabbitmq.myVhost.queues.myQueue.*
** If a [vhosts] section exists but is empty, then no queues will be polled.
** To poll all vhosts and all queues, add the following.
** [vhosts]
** * = *
**
#### Dependencies
* pyrabbit
"""
import diamond.collector
try:
from numbers import Number
Number # workaround for pyflakes issue #13
import pyrabbit.api
except ImportError:
Number = None
class RabbitMQCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(RabbitMQCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname and port to collect from',
'user': 'Username',
'password': 'Password',
'queues': 'Queues to publish. Leave empty to publish all.',
'vhosts': 'A list of vhosts and queues for which we want to collect'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(RabbitMQCollector, self).get_default_config()
config.update({
'path': 'rabbitmq',
'host': 'localhost:55672',
'user': 'guest',
'password': 'guest'
})
return config
def collect(self):
if Number is None:
self.log.error('Unable to import either Number or pyrabbit.api')
return {}
try:
client = pyrabbit.api.Client(self.config['host'],
self.config['user'],
self.config['password'])
legacy = False
if 'vhosts' not in self.config:
legacy = True
if 'queues' in self.config:
self.config['vhosts'] = {"*": self.config['queues']}
else:
self.config['vhosts'] = {"*": ""}
# Legacy configurations, those that don't include the [vhosts]
# section require special care so that we do not break metric
# gathering for people that were using this collector before the
# update to support vhosts.
if not legacy:
vhost_names = client.get_vhost_names()
if "*" in self.config['vhosts']:
for vhost in vhost_names:
# Copy the glob queue list to each vhost not
# specifically defined in the configuration.
if vhost not in self.config['vhosts']:
self.config['vhosts'][vhost] = self.config[
'vhosts']['*']
del self.config['vhosts']["*"]
# Iterate all vhosts in our vhosts configuration. For legacy this
# is "*" to force a single run.
for vhost in self.config['vhosts']:
queues = self.config['vhosts'][vhost]
# Allow the use of a asterix to glob the queues, but replace
# with a empty string to match how legacy config was.
if queues == "*":
queues = ""
allowed_queues = queues.split()
# When we fetch queues, we do not want to define a vhost if
# legacy.
if legacy:
vhost = None
for queue in client.get_queues(vhost):
# If queues are defined and it doesn't match, then skip.
if (queue['name'] not in allowed_queues
and len(allowed_queues) > 0):
continue
for key in queue:
prefix = "queues"
if not legacy:
prefix = "vhosts.%s.%s" % (vhost, "queues")
name = '{0}.{1}'.format(prefix, queue['name'])
self._publish_metrics(name, [], key, queue)
overview = client.get_overview()
for key in overview:
self._publish_metrics('', [], key, overview)
except Exception, e:
self.log.error('An error occurred collecting from RabbitMQ, %s', e)
return {}
def _publish_metrics(self, name, prev_keys, key, data):
"""Recursively publish keys"""
value = data[key]
keys = prev_keys + [key]
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(name, keys, new_key, value)
elif isinstance(value, Number):
joined_keys = '.'.join(keys)
if name:
publish_key = '{0}.{1}'.format(name, joined_keys)
else:
publish_key = joined_keys
self.publish(publish_key, value)
| mit | 3,951,124,300,809,999,000 | 35.065359 | 80 | 0.531171 | false |
jenisys/behave | examples/async_step/features/environment.py | 2 | 1530 | # -*- coding: UTF-8 -*-
from behave.tag_matcher import ActiveTagMatcher, setup_active_tag_values
from behave.api.runtime_constraint import require_min_python_version
from behave import python_feature
# -----------------------------------------------------------------------------
# REQUIRE: python >= 3.4
# -----------------------------------------------------------------------------
require_min_python_version("3.4")
# -----------------------------------------------------------------------------
# SUPPORT: Active-tags
# -----------------------------------------------------------------------------
# -- MATCHES ANY TAGS: @use.with_{category}={value}
# NOTE: active_tag_value_provider provides category values for active tags.
active_tag_value_provider = python_feature.ACTIVE_TAG_VALUE_PROVIDER.copy()
active_tag_matcher = ActiveTagMatcher(active_tag_value_provider)
# -----------------------------------------------------------------------------
# HOOKS:
# -----------------------------------------------------------------------------
def before_all(ctx):
# -- SETUP ACTIVE-TAG MATCHER (with userdata):
setup_active_tag_values(active_tag_value_provider, ctx.config.userdata)
def before_feature(ctx, feature):
if active_tag_matcher.should_exclude_with(feature.tags):
feature.skip(reason=active_tag_matcher.exclude_reason)
def before_scenario(ctx, scenario):
if active_tag_matcher.should_exclude_with(scenario.effective_tags):
scenario.skip(reason=active_tag_matcher.exclude_reason)
| bsd-2-clause | -7,307,714,831,485,707,000 | 38.230769 | 79 | 0.515033 | false |
EDUlib/edx-platform | common/lib/xmodule/xmodule/modulestore/exceptions.py | 1 | 3789 | """
Exceptions thrown by KeyStore objects
"""
class ItemNotFoundError(Exception):
pass
class ItemWriteConflictError(Exception):
pass
class MultipleCourseBlocksFound(Exception):
"""
Raise this exception when Iterating over the course blocks return multiple course blocks.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class MultipleLibraryBlocksFound(Exception):
"""
Raise this exception when Iterating over the library blocks return multiple library blocks.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class InsufficientSpecificationError(Exception):
pass
class OverSpecificationError(Exception):
pass
class InvalidLocationError(Exception):
pass
class NoPathToItem(Exception):
pass
class ReferentialIntegrityError(Exception):
"""
An incorrect pointer to an object exists. For example, 2 parents point to the same child, an
xblock points to a nonexistent child (which probably raises ItemNotFoundError instead depending
on context).
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class DuplicateItemError(Exception):
"""
Attempted to create an item which already exists.
"""
def __init__(self, element_id, store=None, collection=None):
super().__init__()
self.element_id = element_id
self.store = store
self.collection = collection
def __str__(self, *args, **kwargs):
"""
Print info about what's duplicated
"""
return "{store}[{collection}] already has {element_id} ({exception})".format(
store=self.store,
collection=self.collection,
element_id=self.element_id,
exception=Exception.__str__(self, *args, **kwargs),
)
class VersionConflictError(Exception):
"""
The caller asked for either draft or published head and gave a version which conflicted with it.
"""
def __init__(self, requestedLocation, currentHeadVersionGuid):
super().__init__('Requested {}, but current head is {}'.format(
requestedLocation,
currentHeadVersionGuid
))
class DuplicateCourseError(Exception):
"""
An attempt to create a course whose id duplicates an existing course's
"""
def __init__(self, course_id, existing_entry):
"""
existing_entry will have the who, when, and other properties of the existing entry
"""
super().__init__(
f'Cannot create course {course_id}, which duplicates {existing_entry}'
)
self.course_id = course_id
self.existing_entry = existing_entry
class InvalidBranchSetting(Exception):
"""
Raised when the process' branch setting did not match the required setting for the attempted operation on a store.
"""
def __init__(self, expected_setting, actual_setting):
super().__init__(f"Invalid branch: expected {expected_setting} but got {actual_setting}") # lint-amnesty, pylint: disable=line-too-long, super-with-arguments
self.expected_setting = expected_setting
self.actual_setting = actual_setting
class InvalidProctoringProvider(Exception):
"""
Error with selected proctoring provider raised when the provided is unknown.
"""
def __init__(self, proctoring_provider, available_providers):
super().__init__()
self.proctoring_provider = proctoring_provider
self.available_providers = available_providers
def __str__(self, *args, **kwargs):
"""
Print details about error
"""
return f"The selected proctoring provider, {self.proctoring_provider}, is not a valid provider. " \
f"Please select from one of {self.available_providers}."
| agpl-3.0 | 3,610,771,503,910,648,300 | 29.071429 | 166 | 0.663236 | false |
stormi/tsunami | src/primaires/scripting/parser/nombre.py | 1 | 3620 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Nombre, détaillée plus bas."""
from fractions import Fraction
from .expression import Expression
from .delimiteurs import DELIMITEURS
class Nombre(Expression):
"""Expression Nombre.
Notez qu'un nombre peut être :
un entier
un flottant
une fraction
Tous ces nombres sont de toute façon convertis en fraction.
"""
nom = "nombre"
def __init__(self):
"""Constructeur de l'expression."""
Expression.__init__(self)
self.nombre = None
def __repr__(self):
return "nombre({})".format(self.nombre)
def __str__(self):
return "|blc|" + str(self.nombre) + "|ff|"
@classmethod
def parsable(cls, chaine):
"""Retourne True si la chaîne est parsable, False sinon."""
chaine = chaine.lstrip()
fins = [chaine.index(delimiteur) for delimiteur in DELIMITEURS \
if delimiteur in chaine]
fin = fins and min(fins) or None
chaine = chaine[:fin]
try:
nombre = Fraction(chaine)
except ValueError:
nombre = None
return nombre is not None
@classmethod
def parser(cls, chaine):
"""Parse la chaîne.
Retourne l'objet créé et la partie non interprétée de la chaîne.
"""
objet = Nombre()
chaine = chaine.lstrip()
fins = [chaine.index(delimiteur) for delimiteur in DELIMITEURS \
if delimiteur in chaine]
if fins:
fin = min(fins)
else:
fin = None
chaine_interpreter = chaine[:fin]
objet.nombre = Fraction(chaine_interpreter)
return objet, chaine[len(chaine_interpreter):]
def get_valeur(self, evt):
"""Retourne le nombre sous la forme d'un objet Fraction."""
return self.nombre
@property
def code_python(self):
"""Retourne le code Python associé."""
return repr(self.nombre)
| bsd-3-clause | -257,016,107,874,898,900 | 33.692308 | 79 | 0.660754 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/relatedperson_tests.py | 1 | 7168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import relatedperson
from .fhirdate import FHIRDate
class RelatedPersonTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("RelatedPerson", js["resourceType"])
return relatedperson.RelatedPerson(js)
def testRelatedPerson1(self):
inst = self.instantiate_from("relatedperson-example-f001-sarah.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson1(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson1(inst2)
def implRelatedPerson1(self, inst):
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.840.1.113883.2.4.6.3")
self.assertEqual(inst.identifier[0].type.text, "BSN")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.name[0].family, "Abels")
self.assertEqual(inst.name[0].given[0], "Sarah")
self.assertEqual(inst.name[0].use, "usual")
self.assertEqual(inst.relationship.coding[0].code, "SIGOTHR")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v3/RoleCode")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "mobile")
self.assertEqual(inst.telecom[0].value, "0690383372")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "home")
self.assertEqual(inst.telecom[1].value, "[email protected]")
self.assertEqual(inst.text.status, "generated")
def testRelatedPerson2(self):
inst = self.instantiate_from("relatedperson-example-f002-ariadne.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson2(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson2(inst2)
def implRelatedPerson2(self, inst):
self.assertEqual(inst.birthDate.date, FHIRDate("1963").date)
self.assertEqual(inst.birthDate.as_json(), "1963")
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.name[0].text, "Ariadne Bor-Jansma")
self.assertEqual(inst.name[0].use, "usual")
self.assertEqual(inst.period.start.date, FHIRDate("1975").date)
self.assertEqual(inst.period.start.as_json(), "1975")
self.assertEqual(inst.photo[0].contentType, "image/jpeg")
self.assertEqual(inst.relationship.coding[0].code, "SIGOTHR")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v3/RoleCode")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "home")
self.assertEqual(inst.telecom[0].value, "+31201234567")
self.assertEqual(inst.text.status, "generated")
def testRelatedPerson3(self):
inst = self.instantiate_from("relatedperson-example-peter.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson3(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson3(inst2)
def implRelatedPerson3(self, inst):
self.assertEqual(inst.address[0].city, "PleasantVille")
self.assertEqual(inst.address[0].line[0], "534 Erewhon St")
self.assertEqual(inst.address[0].postalCode, "3999")
self.assertEqual(inst.address[0].state, "Vic")
self.assertEqual(inst.address[0].use, "home")
self.assertEqual(inst.gender, "male")
self.assertEqual(inst.id, "peter")
self.assertEqual(inst.name[0].family, "Chalmers")
self.assertEqual(inst.name[0].given[0], "Peter")
self.assertEqual(inst.name[0].given[1], "James")
self.assertEqual(inst.name[0].use, "official")
self.assertEqual(inst.period.start.date, FHIRDate("2012-03-11").date)
self.assertEqual(inst.period.start.as_json(), "2012-03-11")
self.assertEqual(inst.photo[0].contentType, "image/jpeg")
self.assertEqual(inst.photo[0].url, "Binary/f012")
self.assertEqual(inst.relationship.coding[0].code, "C")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v2/0131")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "(03) 5555 6473")
self.assertEqual(inst.text.status, "generated")
def testRelatedPerson4(self):
inst = self.instantiate_from("relatedperson-example.json")
self.assertIsNotNone(inst, "Must have instantiated a RelatedPerson instance")
self.implRelatedPerson4(inst)
js = inst.as_json()
self.assertEqual("RelatedPerson", js["resourceType"])
inst2 = relatedperson.RelatedPerson(js)
self.implRelatedPerson4(inst2)
def implRelatedPerson4(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Paris")
self.assertEqual(inst.address[0].country, "FRA")
self.assertEqual(inst.address[0].line[0], "43, Place du Marché Sainte Catherine")
self.assertEqual(inst.address[0].postalCode, "75004")
self.assertEqual(inst.gender, "female")
self.assertEqual(inst.id, "benedicte")
self.assertEqual(inst.identifier[0].system, "urn:oid:1.2.250.1.61")
self.assertEqual(inst.identifier[0].type.text, "INSEE")
self.assertEqual(inst.identifier[0].use, "usual")
self.assertEqual(inst.identifier[0].value, "272117510400399")
self.assertEqual(inst.name[0].family, "du Marché")
self.assertEqual(inst.name[0].given[0], "Bénédicte")
self.assertEqual(inst.photo[0].contentType, "image/jpeg")
self.assertEqual(inst.photo[0].url, "Binary/f016")
self.assertEqual(inst.relationship.coding[0].code, "N")
self.assertEqual(inst.relationship.coding[0].system, "http://hl7.org/fhir/v2/0131")
self.assertEqual(inst.relationship.coding[1].code, "WIFE")
self.assertEqual(inst.relationship.coding[1].system, "http://hl7.org/fhir/v3/RoleCode")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "+33 (237) 998327")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | 8,045,643,496,570,569,000 | 47.734694 | 95 | 0.664294 | false |
peterayeni/django-smsgateway | smsgateway/south_migrations/0006_auto__add_field_queuedsms_using.py | 1 | 2515 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QueuedSMS.using'
db.add_column('smsgateway_queuedsms', 'using', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'QueuedSMS.using'
db.delete_column('smsgateway_queuedsms', 'using')
models = {
'smsgateway.queuedsms': {
'Meta': {'ordering': "('priority', 'created')", 'object_name': 'QueuedSMS'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'2'", 'max_length': '1'}),
'reliable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'using': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'smsgateway.sms': {
'Meta': {'ordering': "('sent',)", 'object_name': 'SMS'},
'backend': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '32', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'direction': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'gateway': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gateway_ref': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operator': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['smsgateway']
| bsd-3-clause | 799,461,428,937,790,200 | 51.395833 | 160 | 0.561829 | false |
Zokol/The-Great-Dalmuti | dalmut.py | 1 | 8497 | import random
"""
THE GREAT DALMUTI
Heikki "Zokol" Juva 2015 - [email protected]
"""
## Exception raised when all players have skipped the round
class SkipException(Exception):
pass
class RestartRound(Exception):
pass
class Card:
def __init__(self, value):
self.value = value
def __repr__(self):
return "Card: " + str(self.value)
def __str__(self):
return str(self.value)
class Player:
def __init__(self, name):
self.name = name
self.hand = []
self.position = "TBD"
self.stats = {"Dalmut": [], "Trader": [], "Slave": []}
def __str__(self):
card_list = []
for card in self.hand:
card_list.append(card.__str__())
return str(self.name) + " " + self.position + " : " + ", ".join(card_list)
def sort_hand(self):
self.hand.sort(key=lambda card: card.value, reverse=True)
def receive_card(self, card):
self.hand.append(card)
self.sort_hand()
def take_card(self, id):
return self.hand.pop(id)
def take_highest_card(self):
self.sort_hand()
return self.take_card(0)
def take_lowest_card(self):
self.sort_hand()
return self.take_card(len(self.hand)-1)
def count_cards(self, order):
return len([card for card in self.hand if card.value == order])
# Return those cards that player has many and those that are as low number as possible
def take_best_cards(self, limit, count):
self.sort_hand()
best = [-1, -1] # First is the card order, second is the 'point-value'
if limit > self.hand[0].value:
highest_card = self.hand[0].value + 1
else:
highest_card = limit
#print(higest_card)
#print(self.count_cards(higest_card))
for i in reversed(range(highest_card)):
if count == -1:
points = self.count_cards(i) * i
if best[1] < points:
best[0] = i
best[1] = points
elif self.count_cards(i) == count:
best[0] = i
break
if best[0] == -1: raise SkipException # No cards -> skip
picked_cards = [card for card in self.hand if card.value == best[0]]
if count != -1: picked_cards = picked_cards[:count]
self.hand = [card for card in self.hand if card not in picked_cards]
self.sort_hand()
return picked_cards
def play_hand(self, table):
if len(table) > 0:
count = len(table[-1])
limit = table[-1][0].value
else:
count = -1
limit = 99
return self.take_best_cards(limit, count)
def empty_hand(self):
self.hand = []
class Stack:
def __init__(self, largest_number):
self.stack = []
for value in range(1, largest_number + 1):
for i in range(value):
self.stack.append(Card(value))
def __str__(self):
card_list = []
for card in self.stack:
card_list.append(card.__str__())
return ", ".join(card_list)
def __len__(self):
return len(self.stack)
def shuffle(self):
random.shuffle(self.stack)
def lift_top_card(self):
return self.stack.pop(0)
def add(self, card):
self.stack.append(card)
class Game:
def __init__(self, number_of_players, number_of_games, number_of_rounds):
self.table = []
self.players = []
for p in range(number_of_players):
self.players.append(Player("Player " + str(p)))
self.reset_stack()
# Determine initial position for players
# Each player lifts one card from stack
# Lowest card holder is the Great Dalmut
# Highest card holder is the slave
# Everyone in between are traders
self.initial_pos()
print("Intial position for players determined")
self.print_players()
# Main loop
#starting_player = self.players[0]
for i in range(number_of_games):
self.reset_stack()
self.play_game(self.players, number_of_rounds)
#self.order_players(starting_player)
print("Game over")
print("RESULTS:")
self.print_stats()
def reset_stack(self):
self.empty_players_hands()
# Create stack
self.stack = Stack(12) # Create stack with the highest number being 12
print("Number of cards:", len(self.stack))
print("Stack")
print(self.stack)
print("-----------------------")
print("")
# Shuffle stack
print("Stack shuffled")
self.stack.shuffle()
print(self.stack)
print("-----------------------")
print("")
def play_game(self, playing_order, number_of_rounds):
print("-----------------------")
print("")
print("Cards dealt")
self.deal_cards()
self.print_players()
print("-----------------------")
print("")
round_i = 0
while round_i < number_of_rounds:
round_i += 1
print("Play round", round_i)
#print(playing_order)
playing_order = self.play_round(playing_order)
#print(playing_order)
playing_order[0].stats["Dalmut"].append(round_i)
for player in playing_order[1: -1]:
player.stats["Trader"].append(round_i)
playing_order[-1].stats["Slave"].append(round_i)
print("Players card count:", self.count_player_cards(playing_order))
self.empty_table()
self.deal_cards()
print("Players card count:", self.count_player_cards(playing_order))
#if not new_order[0].hand: return new_order #XXX ????
self.table = []
self.print_players()
self.print_stats()
def print_players(self):
for p in self.players:
print(p)
def print_stats(self):
for p in self.players:
print (p.name, "Dalmut:", len(p.stats["Dalmut"]), "Trader:", len(p.stats["Trader"]), "Slave:", len(p.stats["Slave"]))
def print_table(self):
top_cards = self.table[-1]
print(str(len(top_cards)), "x", top_cards[0], "on the table")
def initial_pos(self):
for player in self.players:
if len(self.stack) > 0: player.receive_card(self.stack.lift_top_card())
else: print("Too small stack to deal, not enough cards for everyone")
self.players.sort(key = lambda player: player.hand[0].value)
for player in self.players:
player.position = "Trader"
player.stats["Trader"].append(0)
self.players[0].position = "Dalmut"
self.players[-1].position = "Slave"
self.players[0].stats["Dalmut"].append(0)
self.players[-1].stats["Slave"].append(0)
def deal_cards(self):
print("Number of cards in stack:", len(self.stack))
card_id = 0
while card_id < len(self.stack):
for player in self.players:
player.receive_card(self.stack.lift_top_card())
card_id += 1
def count_player_cards(self, players):
total = 0
for player in players:
total += len(player.hand)
return total
def empty_players_hands(self):
for player in self.players:
player.empty_hand()
def empty_table(self):
card_count = 0
for cards in self.table:
for card in cards:
card_count += len(cards)
self.stack.add(cards.pop(cards.index(card)))
print("Number of cards on table", card_count)
self.table = []
def play_round(self, players):
#starting_index = self.players.index(starting_player)
#transposed_players = self.players[starting_index:] + self.players[:starting_index]
new_order = []
skip_counter = 0
new_dalmut = False
while True:
try:
for player in players:
if skip_counter == len(players) - 1:
#return player
## Every other player skipped, transpose player-list to let current player to start the next round
starting_index = self.players.index(player)
transposed_players = self.players[starting_index:] + self.players[:starting_index]
players = transposed_players
skip_counter = 0
self.empty_table()
raise RestartRound
try:
#print(player)
## If someone runs out of cards, here we determine who gets which position for the next game
"""
print("Hand empty:", not player.hand)
print("Player finished:", player in new_order)
print("Is new dalmut found:", new_dalmut)
"""
if player in new_order:
pass
elif not player.hand and not new_dalmut:
#print("New Dalmut found!!")
new_order.append(player) # First player runs out of cards
new_dalmut = True
elif not player.hand and new_dalmut and len(players) - 1 > len(new_order):
new_order.append(player) # Player runs out of cards, who is not the first and not the last
elif not player.hand and len(players) - 1 == len(new_order): # Last player runs out of cards
new_order.append(player)
#print("NEW ORDER:", new_order)
return new_order
else:
self.table.append(player.play_hand(self.table)) ## Let the next playr to play the hand and place it on the table
self.print_table()
#skip_counter = 0
except SkipException:
print("Skip")
skip_counter += 1
except RestartRound:
print("Restarting round with new order")
pass
if __name__ == '__main__':
game = Game(10, 3, 900) | mit | -4,180,557,527,489,499,000 | 26.237179 | 120 | 0.645993 | false |
hpd/MitsubaForMaya | plug-ins/mitsuba/volumes/volume.py | 1 | 2498 | import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeName = "MitsubaVolume"
kPluginNodeClassify = "shader/volume"
kPluginNodeId = OpenMaya.MTypeId(0x87033)
class volume(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mSourcefile = OpenMaya.MObject()
mGridDims = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
def compute(self, plug, block):
if plug == volume.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( volume.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return volume()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
try:
volume.mSourcefile = nAttr.createColor("sourceFile", "sf")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
#nAttr.setDefault(50,50,50)
volume.mGridDims = nAttr.create("gridDimensions", "gd", OpenMaya.MFnNumericData.k3Float)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
volume.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
volume.addAttribute(volume.mSourcefile)
volume.addAttribute(volume.mGridDims)
volume.addAttribute(volume.mOutColor)
except:
sys.stderr.write("Failed to add attributes\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
| mit | -3,909,570,042,202,730,500 | 29.463415 | 96 | 0.658927 | false |
awwong1/CMPUT404LAB9_W2016 | iguana/iguana/urls.py | 1 | 1469 | """iguana URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework import routers
from rest_framework.authtoken import views as authtoken_views
from quickstart import views as quickstart_views
from client import views as client_views
router = routers.DefaultRouter()
router.register(r'users', quickstart_views.UserViewSet)
router.register(r'groups', quickstart_views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls)),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', authtoken_views.obtain_auth_token),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^', client_views.example_view, name="index")
]
| mit | 7,628,134,095,978,440,000 | 38.702703 | 83 | 0.724983 | false |
dborzov/practicin | 67-binary-heap/solution.py | 1 | 1403 | class Heap:
def __init__(self):
self.bh = []
def pop(self):
if len(self.bh)==0: raise StandardError('No more elements in the heap')
if len(self.bh)==1:
return self.bh.pop()
return_value, self.bh[0] = self.bh[0], self.bh[-1]
self.bh = self.bh[:len(self.bh)-1]
cur = 0
while True:
left, right = cur*2+1, cur*2+2
get_value = lambda x:self.bh[x] if x<len(self.bh) else None
top_element = max([left, right], key=get_value)
print "Stack:", self.bh
print "Left:{}, right:{}, top element:{}".format(left, right, top_element)
if (get_value(top_element) is None) or (self.bh[top_element] < self.bh[cur]):
return return_value
self.bh[cur], self.bh[top_element] = self.bh[top_element], self.bh[cur]
cur = top_element
def bubble_up(self,cur):
while cur!=0:
parent=(cur-1)//2
if self.bh[parent]>self.bh[cur]:
return
self.bh[parent], self.bh[cur] = self.bh[cur], self.bh[parent]
cur=parent
def add(self, new_value):
self.bh.append(new_value)
self.bubble_up(len(self.bh)-1)
print 'We added {}, and now stack is {}'.format(new_value, self.bh)
new_one = Heap()
new_one.add(3)
new_one.add(2)
new_one.add(12)
new_one.add(9)
print 'Pop: ', new_one.pop()
print 'Pop: ', new_one.pop()
print 'Pop: ', new_one.pop()
| mit | -2,426,201,212,558,269,400 | 30.177778 | 85 | 0.570207 | false |
NeCTAR-RC/cinder | cinder/tests/test_netapp_nfs.py | 1 | 47792 |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the orignal error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
| apache-2.0 | 4,882,235,889,434,253,000 | 38.926483 | 79 | 0.579951 | false |
sajuptpm/neutron-ipam | neutron/tests/unit/bigswitch/test_capabilities.py | 1 | 2608 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author Kevin Benton
from contextlib import nested
import mock
from neutron.tests.unit.bigswitch import test_router_db
PLUGIN = 'neutron.plugins.bigswitch.plugin'
SERVERMANAGER = PLUGIN + '.servermanager'
SERVERPOOL = SERVERMANAGER + '.ServerPool'
SERVERRESTCALL = SERVERMANAGER + '.ServerProxy.rest_call'
class CapabilitiesTests(test_router_db.RouterDBTestCase):
def test_floating_ip_capability(self):
with nested(
mock.patch(SERVERRESTCALL,
return_value=(200, None, '["floatingip"]', None)),
mock.patch(SERVERPOOL + '.rest_create_floatingip',
return_value=(200, None, None, None)),
mock.patch(SERVERPOOL + '.rest_delete_floatingip',
return_value=(200, None, None, None))
) as (mock_rest, mock_create, mock_delete):
with self.floatingip_with_assoc() as fip:
pass
mock_create.assert_has_calls(
[mock.call(fip['floatingip']['tenant_id'], fip['floatingip'])]
)
mock_delete.assert_has_calls(
[mock.call(fip['floatingip']['tenant_id'],
fip['floatingip']['id'])]
)
def test_floating_ip_capability_neg(self):
with nested(
mock.patch(SERVERRESTCALL,
return_value=(200, None, '[""]', None)),
mock.patch(SERVERPOOL + '.rest_update_network',
return_value=(200, None, None, None))
) as (mock_rest, mock_netupdate):
with self.floatingip_with_assoc() as fip:
pass
updates = [call[0][2]['floatingips']
for call in mock_netupdate.call_args_list]
all_floats = [f['floating_ip_address']
for floats in updates for f in floats]
self.assertIn(fip['floatingip']['floating_ip_address'], all_floats)
| apache-2.0 | -2,216,588,186,026,200,800 | 39.123077 | 79 | 0.61273 | false |
fastavro/fastavro | fastavro/_write_py.py | 1 | 22640 | # cython: auto_cpdef=True
"""Python code for writing AVRO files"""
# This code is a modified version of the code at
# http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under
# Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0)
import json
from io import BytesIO
from os import urandom, SEEK_SET
import bz2
import lzma
import zlib
from .const import NAMED_TYPES
from .io.binary_encoder import BinaryEncoder
from .io.json_encoder import AvroJSONEncoder
from .validation import _validate
from .read import HEADER_SCHEMA, SYNC_SIZE, MAGIC, reader
from .logical_writers import LOGICAL_WRITERS
from .schema import extract_record_type, extract_logical_type, parse_schema
from ._write_common import _is_appendable
def write_null(encoder, datum, schema, named_schemas, fname):
"""null is written as zero bytes"""
encoder.write_null()
def write_boolean(encoder, datum, schema, named_schemas, fname):
"""A boolean is written as a single byte whose value is either 0 (false) or
1 (true)."""
encoder.write_boolean(datum)
def write_int(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_int(datum)
def write_long(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_long(datum)
def write_float(encoder, datum, schema, named_schemas, fname):
"""A float is written as 4 bytes. The float is converted into a 32-bit
integer using a method equivalent to Java's floatToIntBits and then encoded
in little-endian format."""
encoder.write_float(datum)
def write_double(encoder, datum, schema, named_schemas, fname):
"""A double is written as 8 bytes. The double is converted into a 64-bit
integer using a method equivalent to Java's doubleToLongBits and then
encoded in little-endian format."""
encoder.write_double(datum)
def write_bytes(encoder, datum, schema, named_schemas, fname):
"""Bytes are encoded as a long followed by that many bytes of data."""
encoder.write_bytes(datum)
def write_utf8(encoder, datum, schema, named_schemas, fname):
"""A string is encoded as a long followed by that many bytes of UTF-8
encoded character data."""
encoder.write_utf8(datum)
def write_crc32(encoder, datum):
"""A 4-byte, big-endian CRC32 checksum"""
encoder.write_crc32(datum)
def write_fixed(encoder, datum, schema, named_schemas, fname):
"""Fixed instances are encoded using the number of bytes declared in the
schema."""
if len(datum) != schema["size"]:
raise ValueError(
f"data of length {len(datum)} does not match schema size: {schema}"
)
encoder.write_fixed(datum)
def write_enum(encoder, datum, schema, named_schemas, fname):
"""An enum is encoded by a int, representing the zero-based position of
the symbol in the schema."""
index = schema["symbols"].index(datum)
encoder.write_enum(index)
def write_array(encoder, datum, schema, named_schemas, fname):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
encoder.write_array_start()
if len(datum) > 0:
encoder.write_item_count(len(datum))
dtype = schema["items"]
for item in datum:
write_data(encoder, item, dtype, named_schemas, fname)
encoder.end_item()
encoder.write_array_end()
def write_map(encoder, datum, schema, named_schemas, fname):
"""Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
encoder.write_map_start()
if len(datum) > 0:
encoder.write_item_count(len(datum))
vtype = schema["values"]
for key, val in datum.items():
encoder.write_utf8(key)
write_data(encoder, val, vtype, named_schemas, fname)
encoder.write_map_end()
def write_union(encoder, datum, schema, named_schemas, fname):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value. The value
is then encoded per the indicated schema within the union."""
best_match_index = -1
if isinstance(datum, tuple):
(name, datum) = datum
for index, candidate in enumerate(schema):
extracted_type = extract_record_type(candidate)
if extracted_type in NAMED_TYPES:
schema_name = candidate["name"]
else:
schema_name = extracted_type
if name == schema_name:
best_match_index = index
break
if best_match_index == -1:
field = f"on field {fname}" if fname else ""
msg = (
f"provided union type name {name} not found in schema "
+ f"{schema} {field}"
)
raise ValueError(msg)
index = best_match_index
else:
pytype = type(datum)
most_fields = -1
# All of Python's floating point values are doubles, so to
# avoid loss of precision, we should always prefer 'double'
# if we are forced to choose between float and double.
#
# If 'double' comes before 'float' in the union, then we'll immediately
# choose it, and don't need to worry. But if 'float' comes before
# 'double', we don't want to pick it.
#
# So, if we ever see 'float', we skim through the rest of the options,
# just to see if 'double' is a possibility, because we'd prefer it.
could_be_float = False
for index, candidate in enumerate(schema):
if could_be_float:
if extract_record_type(candidate) == "double":
best_match_index = index
break
else:
# Nothing except "double" is even worth considering.
continue
if _validate(datum, candidate, named_schemas, raise_errors=False):
record_type = extract_record_type(candidate)
if record_type == "record":
logical_type = extract_logical_type(candidate)
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, candidate)
candidate_fields = set(f["name"] for f in candidate["fields"])
datum_fields = set(datum)
fields = len(candidate_fields.intersection(datum_fields))
if fields > most_fields:
best_match_index = index
most_fields = fields
elif record_type == "float":
best_match_index = index
# Continue in the loop, because it's possible that there's
# another candidate which has record type 'double'
could_be_float = True
else:
best_match_index = index
break
if best_match_index == -1:
field = f"on field {fname}" if fname else ""
raise ValueError(
f"{repr(datum)} (type {pytype}) do not match {schema} {field}"
)
index = best_match_index
# write data
# TODO: There should be a way to give just the index
encoder.write_index(index, schema[index])
write_data(encoder, datum, schema[index], named_schemas, fname)
def write_record(encoder, datum, schema, named_schemas, fname):
"""A record is encoded by encoding the values of its fields in the order
that they are declared. In other words, a record is encoded as just the
concatenation of the encodings of its fields. Field values are encoded per
their schema."""
for field in schema["fields"]:
name = field["name"]
if name not in datum and "default" not in field and "null" not in field["type"]:
raise ValueError(f"no value and no default for {name}")
write_data(
encoder,
datum.get(name, field.get("default")),
field["type"],
named_schemas,
name,
)
WRITERS = {
"null": write_null,
"boolean": write_boolean,
"string": write_utf8,
"int": write_int,
"long": write_long,
"float": write_float,
"double": write_double,
"bytes": write_bytes,
"fixed": write_fixed,
"enum": write_enum,
"array": write_array,
"map": write_map,
"union": write_union,
"error_union": write_union,
"record": write_record,
"error": write_record,
}
def write_data(encoder, datum, schema, named_schemas, fname):
"""Write a datum of data to output stream.
Paramaters
----------
encoder: encoder
Type of encoder (e.g. binary or json)
datum: object
Data to write
schema: dict
Schemda to use
named_schemas: dict
Mapping of fullname to schema definition
"""
record_type = extract_record_type(schema)
logical_type = extract_logical_type(schema)
fn = WRITERS.get(record_type)
if fn:
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
try:
return fn(encoder, datum, schema, named_schemas, fname)
except TypeError as ex:
if fname:
raise TypeError(f"{ex} on field {fname}")
raise
else:
return write_data(encoder, datum, named_schemas[record_type], named_schemas, "")
def write_header(encoder, metadata, sync_marker):
header = {
"magic": MAGIC,
"meta": {key: value.encode() for key, value in metadata.items()},
"sync": sync_marker,
}
write_data(encoder, header, HEADER_SCHEMA, {}, "")
def null_write_block(encoder, block_bytes, compression_level):
"""Write block in "null" codec."""
encoder.write_long(len(block_bytes))
encoder._fo.write(block_bytes)
def deflate_write_block(encoder, block_bytes, compression_level):
"""Write block in "deflate" codec."""
# The first two characters and last character are zlib
# wrappers around deflate data.
if compression_level is not None:
data = zlib.compress(block_bytes, compression_level)[2:-1]
else:
data = zlib.compress(block_bytes)[2:-1]
encoder.write_long(len(data))
encoder._fo.write(data)
def bzip2_write_block(encoder, block_bytes, compression_level):
"""Write block in "bzip2" codec."""
data = bz2.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
def xz_write_block(encoder, block_bytes, compression_level):
"""Write block in "xz" codec."""
data = lzma.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
BLOCK_WRITERS = {
"null": null_write_block,
"deflate": deflate_write_block,
"bzip2": bzip2_write_block,
"xz": xz_write_block,
}
def _missing_codec_lib(codec, library):
def missing(encoder, block_bytes, compression_level):
raise ValueError(
f"{codec} codec is supported but you need to install {library}"
)
return missing
def snappy_write_block(encoder, block_bytes, compression_level):
"""Write block in "snappy" codec."""
data = snappy.compress(block_bytes)
encoder.write_long(len(data) + 4) # for CRC
encoder._fo.write(data)
encoder.write_crc32(block_bytes)
try:
import snappy
except ImportError:
BLOCK_WRITERS["snappy"] = _missing_codec_lib("snappy", "python-snappy")
else:
BLOCK_WRITERS["snappy"] = snappy_write_block
def zstandard_write_block(encoder, block_bytes, compression_level):
"""Write block in "zstandard" codec."""
data = zstd.ZstdCompressor().compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
try:
import zstandard as zstd
except ImportError:
BLOCK_WRITERS["zstandard"] = _missing_codec_lib("zstandard", "zstandard")
else:
BLOCK_WRITERS["zstandard"] = zstandard_write_block
def lz4_write_block(encoder, block_bytes, compression_level):
"""Write block in "lz4" codec."""
data = lz4.block.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
try:
import lz4.block
except ImportError:
BLOCK_WRITERS["lz4"] = _missing_codec_lib("lz4", "lz4")
else:
BLOCK_WRITERS["lz4"] = lz4_write_block
class GenericWriter:
def __init__(self, schema, metadata=None, validator=None):
self._named_schemas = {}
self.schema = parse_schema(schema, self._named_schemas)
self.validate_fn = _validate if validator is True else validator
self.metadata = metadata or {}
if isinstance(schema, dict):
schema = {
key: value
for key, value in schema.items()
if key not in ("__fastavro_parsed", "__named_schemas")
}
elif isinstance(schema, list):
schemas = []
for s in schema:
if isinstance(s, dict):
schemas.append(
{
key: value
for key, value in s.items()
if key
not in (
"__fastavro_parsed",
"__named_schemas",
)
}
)
else:
schemas.append(s)
schema = schemas
self.metadata["avro.schema"] = json.dumps(schema)
class Writer(GenericWriter):
def __init__(
self,
fo,
schema,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
compression_level=None,
):
GenericWriter.__init__(self, schema, metadata, validator)
self.metadata["avro.codec"] = codec
if isinstance(fo, BinaryEncoder):
self.encoder = fo
else:
self.encoder = BinaryEncoder(fo)
self.io = BinaryEncoder(BytesIO())
self.block_count = 0
self.sync_interval = sync_interval
self.compression_level = compression_level
if _is_appendable(self.encoder._fo):
# Seed to the beginning to read the header
self.encoder._fo.seek(0)
avro_reader = reader(self.encoder._fo)
header = avro_reader._header
file_writer_schema = parse_schema(avro_reader.writer_schema)
if self.schema != file_writer_schema:
raise ValueError(
f"Provided schema {self.schema} does not match "
+ f"file writer_schema {file_writer_schema}"
)
codec = avro_reader.metadata.get("avro.codec", "null")
self.sync_marker = header["sync"]
# Seek to the end of the file
self.encoder._fo.seek(0, 2)
self.block_writer = BLOCK_WRITERS[codec]
else:
self.sync_marker = sync_marker or urandom(SYNC_SIZE)
try:
self.block_writer = BLOCK_WRITERS[codec]
except KeyError:
raise ValueError(f"unrecognized codec: {codec}")
write_header(self.encoder, self.metadata, self.sync_marker)
def dump(self):
self.encoder.write_long(self.block_count)
self.block_writer(self.encoder, self.io._fo.getvalue(), self.compression_level)
self.encoder._fo.write(self.sync_marker)
self.io._fo.truncate(0)
self.io._fo.seek(0, SEEK_SET)
self.block_count = 0
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema, self._named_schemas)
write_data(self.io, record, self.schema, self._named_schemas, "")
self.block_count += 1
if self.io._fo.tell() >= self.sync_interval:
self.dump()
def write_block(self, block):
# Clear existing block if there are any records pending
if self.io._fo.tell() or self.block_count > 0:
self.dump()
self.encoder.write_long(block.num_records)
self.block_writer(self.encoder, block.bytes_.getvalue(), self.compression_level)
self.encoder._fo.write(self.sync_marker)
def flush(self):
if self.io._fo.tell() or self.block_count > 0:
self.dump()
self.encoder._fo.flush()
class JSONWriter(GenericWriter):
def __init__(
self,
fo,
schema,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
codec_compression_level=None,
):
GenericWriter.__init__(self, schema, metadata, validator)
self.encoder = fo
self.encoder.configure(self.schema, self._named_schemas)
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema, self._named_schemas)
write_data(self.encoder, record, self.schema, self._named_schemas, "")
def flush(self):
self.encoder.flush()
def writer(
fo,
schema,
records,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
codec_compression_level=None,
):
"""Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
schema: dict
Writer schema
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
codec_compression_level: int, optional
Compression level to use with the specified codec (if the codec
supports it)
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
The `fo` argument is a file-like object so another common example usage
would use an `io.BytesIO` object like so::
from io import BytesIO
from fastavro import writer
fo = BytesIO()
writer(fo, schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
"""
# Sanity check that records is not a single dictionary (as that is a common
# mistake and the exception that gets raised is not helpful)
if isinstance(records, dict):
raise ValueError('"records" argument should be an iterable, not dict')
if isinstance(fo, AvroJSONEncoder):
writer_class = JSONWriter
else:
# Assume a binary IO if an encoder isn't given
writer_class = Writer
fo = BinaryEncoder(fo)
output = writer_class(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
sync_marker,
codec_compression_level,
)
for record in records:
output.write(record)
output.flush()
def schemaless_writer(fo, schema, record):
"""Write a single record without the schema or header information
Parameters
----------
fo: file-like
Output file
schema: dict
Schema
record: dict
Record to write
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file', 'rb') as fp:
fastavro.schemaless_writer(fp, parsed_schema, record)
Note: The ``schemaless_writer`` can only write a single record.
"""
named_schemas = {}
schema = parse_schema(schema, named_schemas)
encoder = BinaryEncoder(fo)
write_data(encoder, record, schema, named_schemas, "")
encoder.flush()
| mit | 5,944,205,814,396,213,000 | 31.906977 | 88 | 0.60371 | false |
songmonit/CTTMSONLINE_V8 | openerp/release.py | 1 | 2596 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
RELEASE_LEVELS = [ALPHA, BETA, RELEASE_CANDIDATE, FINAL] = ['alpha', 'beta', 'candidate', 'final']
RELEASE_LEVELS_DISPLAY = {ALPHA: ALPHA,
BETA: BETA,
RELEASE_CANDIDATE: 'rc',
FINAL: ''}
# version_info format: (MAJOR, MINOR, MICRO, RELEASE_LEVEL, SERIAL)
# inspired by Python's own sys.version_info, in order to be
# properly comparable using normal operarors, for example:
# (6,1,0,'beta',0) < (6,1,0,'candidate',1) < (6,1,0,'candidate',2)
# (6,1,0,'candidate',2) < (6,1,0,'final',0) < (6,1,2,'final',0)
version_info = (2, 8, 0, BETA, 0)
version = '.'.join(map(str, version_info[:2])) + RELEASE_LEVELS_DISPLAY[version_info[3]] + str(version_info[4] or '')
series = serie = major_version = '.'.join(map(str, version_info[:2]))
product_name = 'CTTMS'
description = 'CTTMS Server'
long_desc = '''CTTMS is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and XML-RPC interfaces.
'''
classifiers = """Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU Affero General Public License v3
Programming Language :: Python
"""
url = 'https://www.cttms.com'
author = 'OpenERP S.A.'
author_email = '[email protected]'
license = 'AGPL-3'
nt_service_name = "CTTMS-server-" + series
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,346,447,488,891,795,000 | 45.357143 | 117 | 0.645223 | false |
orcmkit/ORCmKit | Python27/ORCSim/LiquidReceiver.py | 1 | 4981 | from __future__ import division
from CoolProp.CoolProp import PropsSI
import pylab
from ACHPTools import Write2CSV
from matplotlib.pyplot import plot, show, figure, semilogy, xlim, ylim, title, xlabel, ylabel, legend
from math import pi,exp,log,sqrt,tan,cos,sin
from scipy.optimize import brentq
from scipy.constants import g
import numpy as np
from PHEX_ASME2015 import PHEHXClass
from LineSet import LineSetClass
class LiquidReceiverClass():
"Create Refrigerant buffer tank class"
def __init__(self,**kwargs):
#Load up the parameters passed in
# using the dictionary
self.__dict__.update(kwargs)
self.Condenser=PHEHXClass()
def Update(self,**kwargs):
#Update the parameters passed in
# using the dictionary
self.__dict__.update(kwargs)
def OutputList(self):
"""
Return a list of parameters for this component for further output
It is a list of tuples, and each tuple is formed of items with indices:
[0] Description of value
[1] Units of value
[2] The value itself
"""
return [
('Liquid Receiver Total Volume','m3',self.Volume_tank),
('Liquid Receiver Total Charge','Kg',self.Charge_Tank),
('Inlet Temperature','K',self.Tin),
('Outlet Temperature','K',self.Tout),
('Inlet Pressure','kPa',self.pin),
('Inlet Density', 'kg/m3',self.rho_in),
('Outlet Pressure','kPa',self.pout)
]
def Calculate(self):
"""
The liquid receiver acts as a damper in the cycle, absorbing the the mass flow rate
fluctuations. More concretely, a different explanation can be given.
When the liquid receiver gets subcooled or saturated liquid at its top, it can be assumed to be
in thermodynamic equilibrium at each time, because liquid and vapor have the same pressure when
they enter it (indeed, if the reservoir isn't full, the vapor contained in it must be saturated, as it is in
presence of liquid). In the inferior part of the tank, the mix of saturated and subcooled liquid (already
present) allows the working fluid to exit it in a subcooled liquid state. The saturation pressure and
temperature then reign then in the superior part of the reservoir. Thus, with this component, the
charge fluctuations are litteraly absorbed, put to an equilibrium value, and the subcooling becomes
null (this fact can't be stated in the presence of non-condensable gases).
level = (h_v_sat - h)/(h_v_sat - h_l_sat)*(rho/rho_l_sat)
"""
# Density [kg/m^3]
self.rho_in=PropsSI('D','T',self.Tin, 'P', self.pin*1000+100, self.Ref)
#Static pressure (rho*g*h) between inlet and outlet of the tank"
self.pout=self.pin #+ (self.rho_in*g*self.h_ports)/1000
# print 'LiquidReceiver.pout', self.pout
self.Tout = self.Tin #no temperature gradient is observed in the reservoir.
self.hin = PropsSI('H','T',self.Tin,'P',self.pin*1000+100,self.Ref) #J/kg
"""
"Calculations"
"x_ex_tank=0" "due to the presence of non condensable gas (air, due to leakage) in the working fluid,
"the liquid at the exit of the tank is not saturated..."
#h_su_tank=h_ex_cd
#V_ex_tank = m_dot/rho_ex_tank "Check V_dot_su_pump at the beginning of the file!!"
"""
self.hout = PropsSI('H','T',self.Tout, 'P', self.pout*1000+100, self.Ref) #J/kg
#print 'LiquidReceiver.hout', self.hout
self.sout = PropsSI('S','T',self.Tout, 'P', self.pout*1000+100, self.Ref) #J/kg
#Calculate saturated values
#Charge of the tank [kg]
"""
The tank is characterized by an internal diameter and heigth (ID,h)
and by the maximum level of refrigerant inside
"""
self.Volume_tank = pi*self.ID**2/4.0*self.h_receiver
self.Charge_Tank = self.Volume_tank * self.rho_in
#self.Volume_ref = self.Charge_Tank/self.LiquidReceiver.rho_in
if __name__=='__main__':
pin_list=[527.374817]
Tin_list=[15.48]
zip(pin_list,Tin_list)
for pin,Tin in zip(pin_list,Tin_list):
kwds={
'Ref':'R134A',
'pin':pin,
'Tin':Tin+273.15,
'ID':0.3,
'h_receiver': 1,
'h_ports':0.5
}
LiquidReceiver=LiquidReceiverClass(**kwds)
LiquidReceiver.Calculate()
print 'Charge [kg]',LiquidReceiver.Charge_Tank
print 'pin [kPa]', LiquidReceiver.pin
print 'pout [kPa]',LiquidReceiver.pout
print 'Receiver Volume [cm3]', LiquidReceiver.Volume_tank*1e6 | mit | 3,989,023,316,795,235,300 | 37.323077 | 116 | 0.598474 | false |
jar3k/django-model-options | model_options/mixins.py | 1 | 1737 | from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, IntegrityError, transaction
from .utils import detect_type
from .models import Option
class OptionsMixin(models.Model):
options = GenericRelation(Option)
class Meta:
abstract = True
def delete_option(self, key):
self.options.get(key=key).delete()
def get_option(self, key, default=None):
try:
option = self.options.get(key=key)
return detect_type(option.value)
except Option.DoesNotExist:
return default
def has_option(self, key):
return bool(self.options.filter(key=key).exists())
def set_option(self, key, value=True):
try:
with transaction.atomic():
self.options.create(key=key, value=value)
except IntegrityError:
option = self.options.get(key=key)
option.value = value
option.save()
class CachedOptionsMixin(object):
@property
def cache_key_prefix(self):
return "{}-{}".format(self._meta.app_label, self._meta.model_name)
def delete_option(self, key):
cache.delete(self._get_cache_key(key))
def get_option(self, key, default=None):
option = self._get_option(key)
return detect_type(option) if option else default
def has_option(self, key):
return bool(self._get_option(key))
def set_option(self, key, value=True):
cache.set(self._get_cache_key(key), value)
def _get_cache_key(self, key):
return "{}-{}".format(self.cache_key_prefix, key)
def _get_option(self, key):
return cache.get(self._get_cache_key(key))
| mit | -4,955,214,051,976,959,000 | 27.016129 | 74 | 0.633851 | false |
ericholscher/djangoembed | oembed/views.py | 1 | 4618 | import re
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, get_resolver
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.template import defaultfilters
from django.utils import simplejson
from django.utils.encoding import smart_str
import oembed
from oembed.consumer import OEmbedConsumer
from oembed.exceptions import OEmbedMissingEndpoint
from oembed.providers import DjangoProvider, HTTPProvider
resolver = get_resolver(None)
def json(request, *args, **kwargs):
"""
The oembed endpoint, or the url to which requests for metadata are passed.
Third parties will want to access this view with URLs for your site's
content and be returned OEmbed metadata.
"""
# coerce to dictionary
params = dict(request.GET.items())
callback = params.pop('callback', None)
url = params.pop('url', None)
if not url:
return HttpResponseBadRequest('Required parameter missing: URL')
try:
provider = oembed.site.provider_for_url(url)
if not provider.provides:
raise OEmbedMissingEndpoint()
except OEmbedMissingEndpoint:
raise Http404('No provider found for %s' % url)
query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v])
try:
resource = oembed.site.embed(url, **query)
except OEmbedException, e:
raise Http404('Error embedding %s: %s' % (url, str(e)))
response = HttpResponse(mimetype='application/json')
json = resource.json
if callback:
response.write('%s(%s)' % (defaultfilters.force_escape(callback), json))
else:
response.write(json)
return response
def consume_json(request):
"""
Extract and return oembed content for given urls.
Required GET params:
urls - list of urls to consume
Optional GET params:
width - maxwidth attribute for oembed content
height - maxheight attribute for oembed content
template_dir - template_dir to use when rendering oembed
Returns:
list of dictionaries with oembed metadata and renderings, json encoded
"""
client = OEmbedConsumer()
urls = request.GET.getlist('urls')
width = request.GET.get('width')
height = request.GET.get('height')
template_dir = request.GET.get('template_dir')
output = {}
for url in urls:
try:
provider = oembed.site.provider_for_url(url)
except OEmbedMissingEndpoint:
oembeds = None
rendered = None
else:
oembeds = url
rendered = client.parse_text(url, width, height, template_dir=template_dir)
output[url] = {
'oembeds': oembeds,
'rendered': rendered,
}
return HttpResponse(simplejson.dumps(output), mimetype='application/json')
def oembed_schema(request):
"""
A site profile detailing valid endpoints for a given domain. Allows for
better auto-discovery of embeddable content.
OEmbed-able content lives at a URL that maps to a provider.
"""
current_domain = Site.objects.get_current().domain
url_schemes = [] # a list of dictionaries for all the urls we can match
endpoint = reverse('oembed_json') # the public endpoint for our oembeds
providers = oembed.site.get_providers()
for provider in providers:
# first make sure this provider class is exposed at the public endpoint
if not provider.provides:
continue
match = None
if isinstance(provider, DjangoProvider):
# django providers define their regex_list by using urlreversing
url_pattern = resolver.reverse_dict.get(provider._meta.named_view)
# this regex replacement is set to be non-greedy, which results
# in things like /news/*/*/*/*/ -- this is more explicit
if url_pattern:
regex = re.sub(r'%\(.+?\)s', '*', url_pattern[0][0][0])
match = 'http://%s/%s' % (current_domain, regex)
elif isinstance(provider, HTTPProvider):
match = provider.url_scheme
else:
match = provider.regex
if match:
url_schemes.append({
'type': provider.resource_type,
'matches': match,
'endpoint': endpoint
})
url_schemes.sort(key=lambda item: item['matches'])
response = HttpResponse(mimetype='application/json')
response.write(simplejson.dumps(url_schemes))
return response
| mit | -8,876,648,113,996,773,000 | 31.293706 | 87 | 0.64097 | false |
schreiberx/sweet | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi/rexi_benchmarks.py | 1 | 8037 | #! /usr/bin/env python3
import os
import sys
import math
from itertools import product
# REXI
from mule_local.rexi.REXICoefficients import *
from mule_local.rexi.pcirexi.BeanREXI import BeanREXI
from mule_local.rexi.pcirexi.LRREXI import LRREXI
from mule_local.rexi.trexi.TREXI import *
from mule_local.rexi.cirexi.CIREXI import *
from mule_local.rexi.elrexi.ELREXI import *
from mule_local.rexi.brexi.BREXI import *
# EFloat
efloat_mode = "float"
def get_rexi_benchmarks(jg):
# Accumulator of all REXI methods
# rexi_method['rexi_method'] = 'file' # Choose REXI method which is typically 'file' for all file-based ones
# rexi_method['rexi_files_coefficients'] = None # List with approximations for different 'phi' functions
rexi_methods = []
#
# CI REXI
#
if True:
# REXI stuff
def fun_params_ci_N(ci_max_real, ci_max_imag):
if ci_max_imag >= 7:
return 128
else:
return 32
params_ci_max_imag = [30.0]
params_ci_max_real = [10.0]
#
# Scale the CI circle radius relative to this time step size
# We do this simply to get a consistent time stepping method
# Otherwise, CI would not behave consistently
# Yes, that's ugly, but simply how it goes :-)
#
params_ci_max_imag_scaling_relative_to_timestep_size = 480
# params_ci_max_imag_scaling_relative_to_timestep_size = None
params_ci_min_imag = 5.0
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for ci_max_imag, ci_max_real in product(params_ci_max_imag, params_ci_max_real):
if params_ci_max_imag_scaling_relative_to_timestep_size != None:
ci_max_imag *= (jg.runtime.timestep_size / params_ci_max_imag_scaling_relative_to_timestep_size)
# "phi0"
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs_phi0 = cirexi.setup(
function_name="phi0",
N=fun_params_ci_N(ci_max_real, ci_max_imag),
lambda_include_imag=ci_max_imag,
lambda_max_real=ci_max_real
).toFloat()
# "phi1"
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs_phi1 = cirexi.setup(
function_name="phi1",
N=fun_params_ci_N(ci_max_real, ci_max_imag),
lambda_include_imag=ci_max_imag,
lambda_max_real=ci_max_real
).toFloat()
# "phi2"
cirexi = CIREXI(efloat_mode=efloat_mode)
coeffs_phi2 = cirexi.setup(
function_name="phi2",
N=fun_params_ci_N(ci_max_real, ci_max_imag),
lambda_include_imag=ci_max_imag, lambda_max_real=ci_max_real
).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
#
# EL-REXI
#
if True:
max_imags = [30.0]
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for max_imag in max_imags:
# "phi0"
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs_phi0 = elrexi.setup(
function_name="phi0",
N=max(64, int(75 * max_imag / 30)),
lambda_max_real=10.5,
lambda_max_imag=max_imag + 2.5
).toFloat()
# "phi1"
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs_phi1 = elrexi.setup(
function_name="phi1",
N=max(64, int(75 * max_imag / 30)),
lambda_max_real=10.5,
lambda_max_imag=max_imag + 2.5
).toFloat()
# "phi2"
elrexi = ELREXI(efloat_mode=efloat_mode)
coeffs_phi2 = elrexi.setup(
function_name="phi2",
N=max(64, int(75 * max_imag / 30)),
lambda_max_real=10.5,
lambda_max_imag=max_imag + 2.5
).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
#
# LR-REXI (Rectangle contour with Gauss-Legendre Quadrature)
#
if True:
max_imags = [30.0]
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for max_imag in max_imags:
# "phi0"
lrrexi = LRREXI(efloat_mode=efloat_mode)
coeffs_phi0 = lrrexi.setup(
function_name="phi0",
width=23,
height=2 * max_imag + 20,
center=-1,
N=128).toFloat()
# "phi1"
lrrexi = LRREXI(efloat_mode=efloat_mode)
coeffs_phi1 = lrrexi.setup(
function_name="phi1",
width=23,
height=2 * max_imag + 20,
center=-1,
N=128).toFloat()
# "phi2"
lrrexi = LRREXI(efloat_mode=efloat_mode)
coeffs_phi2 = lrrexi.setup(
function_name="phi2",
width=23,
height=2 * max_imag + 20,
center=-1,
N=128).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
#
# Bean-REXI
#
if True:
max_imags = [30.0]
rexi_method = {}
# Choose REXI method which is typically 'file' for all file-based ones
rexi_method['rexi_method'] = 'file'
# List with approximations for different 'phi' functions
rexi_method['rexi_files_coefficients'] = None
for max_imag in max_imags:
# "phi0"
beanrexi = BeanREXI(efloat_mode=efloat_mode)
coeffs_phi0 = beanrexi.setup(
function_name="phi0",
horizontal_radius=16,
vertical_radius=max_imag / 30 * 35,
center=-2,
N=max(64, int(75 * max_imag / 30))).toFloat()
# "phi1"
beanrexi = BeanREXI(efloat_mode=efloat_mode)
coeffs_phi1 = beanrexi.setup(
function_name="phi1",
horizontal_radius=16,
vertical_radius=max_imag / 30 * 35,
center=-2,
N=max(64, int(75 * max_imag / 30))).toFloat()
# "phi2"
beanrexi = BeanREXI(efloat_mode=efloat_mode)
coeffs_phi2 = beanrexi.setup(
function_name="phi2",
horizontal_radius=16,
vertical_radius=max_imag / 30 * 35,
center=-2,
N=max(64, int(75 * max_imag / 30))).toFloat()
rexi_method['rexi_files_coefficients'] = [coeffs_phi0, coeffs_phi1, coeffs_phi2]
# Add to list of REXI methods
rexi_methods.append(rexi_method)
return rexi_methods
if __name__ == "__main__":
pass
| mit | -5,738,522,908,586,182,000 | 32.348548 | 126 | 0.520717 | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/mapnik_animated_map.py | 1 | 7430 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os
from opus_core.logger import logger
from opus_core.store.attribute_cache import AttributeCache
from opus_core.simulation_state import SimulationState
from opus_core.session_configuration import SessionConfiguration
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.mapnik_map import MapnikMap
class MapnikAnimation(MapnikMap):
def get_file_extension(self):
return 'gif'
def visualize(self,
indicators_to_visualize,
computed_indicators):
"""Create a map for the given indicator, save it to the cache
directory's 'indicators' sub-directory."""
#TODO: eliminate this example indicator stuff
example_indicator = computed_indicators[indicators_to_visualize[0]]
source_data = example_indicator.source_data
dataset_to_attribute_map = {}
package_order = source_data.get_package_order()
self._create_input_stores(years = source_data.years)
for name, computed_indicator in computed_indicators.items():
if name not in indicators_to_visualize: continue
if computed_indicator.source_data != source_data:
raise Exception('result templates in indicator batch must all be the same.')
dataset_name = computed_indicator.indicator.dataset_name
if dataset_name not in dataset_to_attribute_map:
dataset_to_attribute_map[dataset_name] = []
dataset_to_attribute_map[dataset_name].append(name)
viz_metadata = []
for dataset_name, indicator_names in dataset_to_attribute_map.items():
attributes = [(name,computed_indicators[name].get_computed_dataset_column_name())
for name in indicator_names]
for year in source_data.years:
SessionConfiguration(
new_instance = True,
package_order = package_order,
in_storage = AttributeCache())
SimulationState().set_cache_directory(source_data.cache_directory)
SimulationState().set_current_time(year)
dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
dataset.load_dataset()
if dataset.get_coordinate_system() is not None:
dataset.compute_variables(names = dataset.get_coordinate_system())
for indicator_name, computed_name in attributes:
indicator = computed_indicators[indicator_name]
table_data = self.input_stores[year].load_table(
table_name = dataset_name,
column_names = [computed_name])
if computed_name in table_data:
table_name = self.get_name(
dataset_name = dataset_name,
years = [year],
attribute_names = [indicator_name])
if self.scale:
min_value, max_value = self.scale
else:
min_value, max_value = (None, None)
file_path = os.path.join(self.storage_location,
'anim_' + table_name + '.' + MapnikMap.get_file_extension(self))
dataset.add_attribute(name = str(computed_name),
data = table_data[computed_name])
dataset.plot_map(
name = str(computed_name),
min_value = min_value,
max_value = max_value,
file = str(file_path),
my_title = str(indicator_name),
color_list = self.color_list,
range_list = self.range_list,
label_list = self.label_list,
is_animation = True,
year = year,
resolution = self.resolution,
page_dims = self.page_dims,
map_lower_left = self.map_lower_left,
map_upper_right = self.map_upper_right,
legend_lower_left = self.legend_lower_left,
legend_upper_right = self.legend_upper_right
#filter = where(table_data[computed_name] != -1)
#filter = 'urbansim.gridcell.is_fully_in_water'
)
#metadata = ([indicator_name], table_name, [year])
#viz_metadata.append(metadata)
else:
logger.log_warning('There is no computed indicator %s'%computed_name)
for indicator_name, computed_name in attributes:
self.create_animation(
dataset_name = dataset_name,
year_list = source_data.years,
indicator_name = str(indicator_name),
viz_metadata = viz_metadata
)
visualization_representations = []
for indicator_names, table_name, years in viz_metadata:
visualization_representations.append(
self._get_visualization_metadata(
computed_indicators = computed_indicators,
indicators_to_visualize = indicator_names,
table_name = table_name,
years = years)
)
return visualization_representations
# precondition: year_list must always have at least one element
# this function is called by the visualize function
def create_animation(self, dataset_name, year_list, indicator_name, viz_metadata):
map_file_list = []
for year in year_list:
map_file_list.append(os.path.join(self.storage_location,'anim_'+dataset_name+'_map_'+str(year)+'_'+indicator_name+'.'+MapnikMap.get_file_extension(self)))
table_name = dataset_name+'_animated_map_'+str(min(year_list))+'_'+indicator_name
animation_file_name = str(os.path.join(self.storage_location,table_name+'.'+self.get_file_extension()))
os.system('convert -delay 100 %s -loop 0 %s' % (' '.join(map_file_list), animation_file_name))
# delete intermediate png files
for i in range(map_file_list.__len__()):
os.remove(map_file_list[i])
metadata = ([indicator_name], table_name, [min(year_list)])
viz_metadata.append(metadata)
if __name__ == '__main__':
try:
import mapnik
except:
logger.log_warning('could not import mapnik')
| agpl-3.0 | 8,432,913,646,352,047,000 | 46.025316 | 166 | 0.517766 | false |
won0089/oppia | core/domain/skins_services.py | 1 | 3513 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides services for HTML skins for the reader view."""
__author__ = 'Sean Lip'
import copy
import inspect
from extensions.skins import skin_classes
class Registry(object):
"""Registry of all skins."""
# Dict mapping skin ids to their classes.
_skins_dict = {}
@classmethod
def _refresh_registry(cls):
cls._skins_dict.clear()
# Add new skin classes to the registry.
for name, clazz in inspect.getmembers(skin_classes, inspect.isclass):
if name.endswith('_test') or name == 'BaseSkin':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseSkin' not in ancestor_names:
continue
cls._skins_dict[clazz.skin_id] = clazz
@classmethod
def get_skin_by_id(cls, skin_id):
"""Get a skin class instance by id."""
if not cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict[skin_id]
@classmethod
def get_all_skin_ids(cls):
"""Get a list of all skin ids."""
if not cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict.keys()
@classmethod
def get_all_skin_classes(cls):
"""Get a dict mapping skin ids to skin classes."""
if not cls._skins_dict:
cls._refresh_registry()
return copy.deepcopy(cls._skins_dict)
@classmethod
def get_all_specs(cls):
"""Get a dict mapping skin ids to their gadget panels properties."""
if not cls._skins_dict:
cls._refresh_registry()
specs_dict = {}
classes_dict = cls.get_all_skin_classes()
for skin_id in classes_dict:
specs_dict[skin_id] = classes_dict[skin_id].panels_properties
return specs_dict
@classmethod
def get_skin_templates(cls, skin_ids):
"""Returns the concatanated HTML for the given skins.
Raises an error if any of the skins is not found.
"""
cls._refresh_registry()
return '\n'.join([
cls._skins_dict[skin_id].get_html() for skin_id in skin_ids])
@classmethod
def get_skin_js_url(cls, skin_id):
"""Returns the URL to the directive JS code for a given skin.
Refreshes once if the skin id is not found; subsequently, throws an
error.
"""
if skin_id not in cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict[skin_id].get_js_url()
@classmethod
def get_skin_tag(cls, skin_id):
"""Returns an HTML tag corresponding to the given skin.
Refreshes once if the skin id is not found; subsequently, throws an
error.
"""
if skin_id not in cls._skins_dict:
cls._refresh_registry()
return cls._skins_dict[skin_id].get_tag()
| apache-2.0 | -8,853,533,049,173,117,000 | 30.648649 | 77 | 0.619129 | false |
georgebv/coastlib | coastlib/stats/extreme.py | 1 | 165750 | # coastlib, a coastal engineering Python library
# Copyright (C), 2019 Georgii Bocharov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pickle
import corner
import emcee
import matplotlib.pyplot as plt
import matplotlib.ticker
import mpmath
import numpy as np
import pandas as pd
import scipy.stats
import statsmodels.api as sm
import coastlib.math.derivatives
import coastlib.stats.distributions
# Helper function used to handle quantiles of empty arrays
def empty_quantile(array, *args, **kwargs):
if len(array) > 0:
return np.nanquantile(array, *args, **kwargs)
else:
return np.nan
class EVA:
"""
Initializes the EVA class instance by taking a <dataframe> with values in <column> to analyze.
Extracts extreme values. Provides assistance in threshold value selection for the POT method.
Estimates parameters of distributions for given data using Maximum Likelihood Estimate (MLE)
or estimates posterior distributions of parameters of distributions using Markov chain Monte Carlo (MCMC).
For given return periods gives estimates of return values and associated confidence intervals.
Generates various statistical plots such as return value plot and QQ/PP plots.
Provides multiple goodness-of-fit (GOF) statistics and tests.
Parameters
----------
dataframe : pd.DataFrame or pd.Series
Pandas Dataframe or Series object containing data to be analyzed.
Must have index array of type pd.DatetimeIndex.
column : str or int, optional
Name or index of column in <dataframe> with data to be analyzed.
By default is <None> and takes first (0'th index) column from <dataframe>.
block_size : float, optional
Block size in days. Used to determine number of blocks in data (default=365.2425, one Gregorian year).
Block size is used to estimate probabilities (return periods for observed data) for all methods
and to extract extreme events in the 'Block Maxima' method.
By default, it is one Gregorian year and results in return periods having units of years,
i.e. a 100-<block_size> event by default is a 100-year return period event.
Weekly would be <block_size=7> and monthly would be <block_size=365.2425/12>.
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
Public Attributes
-----------------
self.__init__()
self.dataframe : pd.DataFrame
self.column : str
self.block_size : float
self.gap_length : float
self.number_of_blocks : float
self.dataframe_declustered : np.ndarray
self.get_extremes()
self.extremes_method : str
self.extremes_type : str
self.threshold : float
self.block_boundaries : np.ndarray
self.extremes : pd.DataFrame
self.extremes_rate : float
self.plotting_position : str
self.fit()
self.distribution_name : str
self.fit_method : str
self.fit_parameters : tuple
self.scipy_fit_options : dict
self.sampler : emcee.EnsembleSampler
self.mcmc_chain : np.ndarray
self.fixed_parameters : np.ndarray
self.generate_results()
self.results : pd.DataFrame
Private Attributes
------------------
self.__init__()
self.__status : dict
Public Methods
--------------
self.to_pickle
self.read_pickle
self.get_extremes
self.plot_extremes
self.plot_mean_residual_life
self.plot_parameter_stability
self.test_extremes
self.fit
self.plot_trace
self.plot_corner
self.plot_posterior
self.return_value
self.confidence_interval
self.generate_results
self.plot_summary
self.pdf
self.cdf
self.ppf
self.isf
self.plot_qq
self.goodness_of_fit
Private Methods
---------------
self.__init__
self.__get_blocks
self.__update
self.__repr__
self.__get_return_period
self.__run_mcmc
self._kernel_fit_parameters
self.__monte_carlo
self.__delta
self.__get_property
"""
def __init__(self, dataframe, column=None, block_size=365.2425, gap_length=24):
"""
Initializes the EVA class instance by taking a <dataframe> with values in <column> to analyze.
Calculates number of blocks with <block_size>, accounting for gaps if <gap_length> is given.
Parameters
----------
dataframe : pd.DataFrame or pd.Series
Pandas Dataframe or Series object containing data to be analyzed.
Must have index array of type pd.DatetimeIndex.
column : str or int, optional
Name or index of column in <dataframe> with data to be analyzed.
By default is <None> and takes first (0'th index) column from <dataframe>.
block_size : float, optional
Block size in days. Used to determine number of blocks in data (default=365.2425, one Gregorian year).
Block size is used to estimate probabilities (return periods for observed data) for all methods
and to extract extreme events in the 'Block Maxima' method.
By default, it is one Gregorian year and results in return periods having units of years,
i.e. a 100-<block_size> event by default is a 100-year return period event.
Weekly would be <block_size=7> and monthly would be <block_size=365.2425/12>.
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
"""
# Ensure passed <dataframe> is a pd.Dataframe object or can be converted to one
if isinstance(dataframe, pd.DataFrame):
self.dataframe = dataframe
elif isinstance(dataframe, pd.Series):
self.dataframe = dataframe.to_frame()
else:
raise TypeError(f'<dataframe> must be {pd.DataFrame} or {pd.Series}, {type(dataframe)} was passed')
# Ensure <dataframe> index is pd.DatetimeIndex object
if not isinstance(dataframe.index, pd.DatetimeIndex):
raise TypeError(f'<dataframe> index must be {pd.DatetimeIndex}, {type(dataframe.index)} was passed')
self.dataframe.sort_index(ascending=True, inplace=True)
# Ensure passed <column> represents a column within <dataframe>
if column is not None:
if isinstance(column, int):
if column < len(self.dataframe.columns):
self.column = self.dataframe.columns[column]
else:
raise ValueError(f'<column> with index {column} is not valid for '
f'dataframe with {len(self.dataframe.columns)} columns')
elif isinstance(column, str):
if column in self.dataframe.columns:
self.column = column
else:
raise ValueError(f'Column {column} is not valid for given dataframe.\n'
f'Valid columns are {self.dataframe.columns}')
else:
raise TypeError(f'Column must be {str} or {int}, {type(column)} was passed.')
else:
self.column = self.dataframe.columns[0]
# Ensure no nans are present in the <dataframe> <column>
nancount = np.sum(np.isnan(self.dataframe[self.column].values))
if nancount > 0:
raise ValueError(f'<dataframe> contains {nancount} NaN values in column {self.column}.'
f'\nNaN values must be removed or filled before performing analysis.')
# Ensure values in <dataframe> <column> are real numbers
if not np.all(np.isreal(self.dataframe[self.column].values)):
raise ValueError(f'Values in <dataframe> <column> must be real numbers,'
f' {self.dataframe[self.column].values.dtype} was passed')
# Calculate number of blocks of <block_size> in <dataframe>
self.block_size = block_size
self.gap_length = gap_length
self.number_of_blocks = self.__get_blocks(gap_length=self.gap_length)
# Separate data into clusters using gap_length and plot each cluster independently
# This way distant clusters are not connected on the plot
if self.gap_length is not None:
cluster_values = [[self.dataframe[self.column].values.copy()[0]]]
cluster_indexes = [[self.dataframe.index.values.copy()[0]]]
for index, value in zip(self.dataframe.index, self.dataframe[self.column].values):
# New cluster encountered
if index - cluster_indexes[-1][-1] > np.timedelta64(pd.Timedelta(hours=self.gap_length)):
cluster_values.append([value])
cluster_indexes.append([index])
# Continuing within current cluster
else:
cluster_values[-1].append(value)
cluster_indexes[-1].append(index)
cluster_indexes = np.array(cluster_indexes)
cluster_values = np.array(cluster_values)
self.dataframe_declustered = np.array([cluster_indexes, cluster_values])
else:
self.dataframe_declustered = None
# Initialize internal status
# Internal status is used to delete calculation results when earlier methods are called
# e.g. removes fit data and results when extreme events are exctracted. This prevents conflicts and errors
self.__status = dict(
extremes=False,
fit=False,
results=False
)
# Extremes extraction
self.extremes_method = None
self.extremes_type = None
self.threshold = None
self.block_boundaries = None
self.extremes = None
self.extremes_rate = None
self.plotting_position = None
# Extremes fit
self.distribution_name = None
self.fit_method = None
self.fit_parameters = None
self.scipy_fit_options = None
self.sampler = None
self.mcmc_chain = None
self.fixed_parameters = None
# Results
self.results = None
def __get_blocks(self, gap_length):
"""
Calculates number of blocks of size <self.block_size> in <self.dataframe> <self.column>.
Parameters
----------
gap_length : float, optional
Gap length in hours. Gaps larger than <gap_length> are excluded when calculating total
number of blocks of <block_size> in <dataframe>. Set to None to calculate number of blocks
as "(last_date - first_date) / block_size". Default is 24 hours.
It is also used in Block Maxima extreme value extraction method to get boundaries of blocks.
Returns
-------
n : float
Number of blocks.
"""
# Calculate number of blocks with gaps accounted for
if gap_length is not None:
timedelta = np.timedelta64(pd.Timedelta(hours=gap_length))
# Eliminate gaps in data by shifting all values upstream of the gap downstream by <total_shift>
new_index = self.dataframe.index.values.copy()
for i in np.arange(1, len(new_index)):
shift = new_index[i] - new_index[i-1]
if shift > timedelta:
# Add 1/10 of gap_length to avoid duplicate dates
new_index[i:] -= shift - np.timedelta64(pd.Timedelta(hours=gap_length/10))
series_range = np.float64(new_index[-1] - new_index[0])
# Calculate number of blocks with gaps not accounted for
else:
series_range = np.float64((self.dataframe.index[-1] - self.dataframe.index[0]).value)
return series_range / 1e9 / 60 / 60 / 24 / self.block_size
def __update(self):
"""
Updates internal state of the EVA class instance object.
This method is used to delete calculation results when earlier methods are called.
For example, removes all data related to fit and results when extreme events are extracted.
"""
if not self.__status['extremes']:
self.extremes_method = None
self.extremes_type = None
self.threshold = None
self.block_boundaries = None
self.extremes = None
self.extremes_rate = None
self.plotting_position = None
if not self.__status['fit']:
self.distribution_name = None
self.fit_method = None
self.fit_parameters = None
self.scipy_fit_options = None
self.sampler = None
self.mcmc_chain = None
self.fixed_parameters = None
if not self.__status['results']:
self.results = None
def __repr__(self):
"""
Generates a string with a summary of the EVA class instance object state.
"""
series_range = (self.dataframe.index[-1] - self.dataframe.index[0]).value / 1e9 / 60 / 60 / 24
summary = str(
f'{" "*35}Extreme Value Analysis Summary\n'
f'{"="*100}\n'
f'Analyzed parameter{self.column:>29}{" "*6}Series length{series_range:29.2f} days\n'
f'Gap length{self.gap_length:31.2f} hours{" "*6}'
f'Adjusted series length{self.number_of_blocks*self.block_size:20.2f} days\n'
f'Block size{self.block_size:32.2f} days{" "*6}Number of blocks{self.number_of_blocks:31.2f}\n'
f'{"="*100}\n'
)
if self.__status['extremes']:
summary += str(
f'Number of extreme events{len(self.extremes):23}{" "*6}Extraction method{self.extremes_method:>30}\n'
f'Extreme event rate{self.extremes_rate:16.2f} events/block{" "*6}'
f'Plotting position{self.plotting_position:>30}\n'
f'Threshold{self.threshold:38.2f}{" "*6}Extreme values type{self.extremes_type:>28}\n'
f'{"="*100}\n'
)
else:
summary += str(
f'Number of extreme events{"N/A":>23}{" " * 6}Extraction method{"N/A":>30}\n'
f'Extreme event rate{"N/A":>16} events/block{" " * 6}'
f'Plotting position{"N/A":>30}\n'
f'Threshold{"N/A":>38}{" "*6}Extreme values type{"N/A":>28}\n'
f'{"=" * 100}\n'
)
if self.__status['fit']:
if self.fit_method == 'MCMC':
fit_parameters = self._kernel_fit_parameters(
burn_in=int(self.mcmc_chain.shape[1] / 2),
kernel_steps=100
)
summary += str(
f'Distribution{self.distribution_name:>35}{" " * 6}Fit method{"Markov chain Monte Carlo":>37}\n'
f'MCMC fit parameters (approximate){str(np.round(fit_parameters, 3)):>14}\n'
f'{"=" * 100}'
)
elif self.fit_method == 'MLE':
summary += str(
f'Distribution{self.distribution_name:>35}{" " * 6}Fit method{"Maximum Likelihood Estimate":>37}\n'
f'MLE fit parameters{str(np.round(self.fit_parameters, 3)):>29}\n'
f'{"=" * 100}'
)
else:
summary += str(
f'Distribution{"N/A":>35}{" " * 6}Fit method{"N/A":>37}\n'
f'Fit parameters{"N/A":>33}\n'
f'{"=" * 100}'
)
return summary
def to_pickle(self, path):
"""
Exports EVA object to a .pyc file. Preserves all data and internal states.
Can be used to save work, share analysis results, and to review work of others.
Parameters
----------
path : str
Path to pickle file: e.g. <path:\to\pickle.pyc>.
"""
with open(path, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def read_pickle(path):
"""
Reads a .pyc file with EVA object. Loads all data and internal states.
Can be used to save work, share analysis results, and to review work of others.
Parameters
----------
path : str
Path to pickle file: e.g. <path:\to\pickle.pyc>.
Returns
-------
file : EVA class instance object
Saved EVA object with all data and internal state preserved.
"""
with open(path, 'rb') as f:
file = pickle.load(f)
return file
def get_extremes(self, method='BM', plotting_position='Weibull', extremes_type='high', **kwargs):
"""
Extracts extreme values from <self.dataframe> <self.column> using the BM (Block Maxima)
or the POT (Peaks Over Threshold) methods. If method is POT, also declusters extreme values using
the runs method (aka minimum distance between independent events).
Parameters
----------
method : str, optional
Peak extraction method. 'POT' for Peaks Over Threshold and 'BM' for Block Maxima (default='BM').
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
kwargs
for method='POT'
threshold : float
Threshold for extreme value extraction.
Only values above (below, if <extremes_type='low'>) this threshold are extracted.
r : float, optional
Minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
Returns
-------
Creates a <self.extremes> dataframe with extreme values and return periods determined using
the given plotting position as p=(rank-alpha)/(N+1-alpha-beta) and T=1/(1-p).
"""
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
if extremes_type not in ['high', 'low']:
raise ValueError(f'<extremes_type> must be high or low, {extremes_type} was passed')
self.extremes_type = extremes_type
# Block Maxima method
if method == 'BM':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Set threshold to 0 for compatibility between BM and POT formulas
self.extremes_method = 'Block Maxima'
self.threshold = 0
# Generate new index with gaps eliminated
if self.gap_length is not None:
gap_delta = np.timedelta64(pd.Timedelta(hours=self.gap_length))
# Eliminate gaps in data by shifting all values upstream of the gap downstream by <total_shift>
new_index = self.dataframe.index.values.copy()
for i in np.arange(1, len(new_index)):
shift = new_index[i] - new_index[i-1]
if shift > gap_delta:
# Add 1/10 of gap_length to avoid duplicate dates
new_index[i:] -= shift - np.timedelta64(pd.Timedelta(hours=self.gap_length/10))
else:
new_index = self.dataframe.index.values.copy()
# Create local reindexed dataframe with <new_index> and <id> column to get original datetime later
local_dataframe = pd.DataFrame(
data=self.dataframe[self.column].values.copy(),
columns=[self.column], index=new_index
)
local_dataframe['id'] = np.arange(len(local_dataframe))
# Find boundaries of blocks of <self.block_size>
block_delta = np.timedelta64(pd.Timedelta(days=self.block_size))
block_boundaries = [(new_index[0], new_index[0] + block_delta)]
self.block_boundaries = [self.dataframe.index.values.copy()[0]]
while block_boundaries[-1][-1] < local_dataframe.index.values[-1]:
block_boundaries.append(
(block_boundaries[-1][-1], block_boundaries[-1][-1] + block_delta)
)
self.block_boundaries.append(
self.dataframe.index.values.copy()[
local_dataframe.truncate(before=block_boundaries[-1][0])['id'].values[0]
]
)
self.block_boundaries.append(self.block_boundaries[-1] + block_delta)
self.block_boundaries = np.array(self.block_boundaries)
block_boundaries = np.array(block_boundaries)
# Update number_of_blocks
self.number_of_blocks = len(self.block_boundaries) - 1
# Find extreme values within each block and associated datetime indexes from original dataframe
extreme_values, extreme_indexes = [], []
for i, block_boundary in enumerate(block_boundaries):
if i == len(block_boundaries) - 1:
local_data = local_dataframe[local_dataframe.index >= block_boundary[0]]
else:
local_data = local_dataframe[
(local_dataframe.index >= block_boundary[0]) & (local_dataframe.index < block_boundary[1])
]
if len(local_data) != 0:
if self.extremes_type == 'high':
extreme_values.append(local_data[self.column].values.copy().max())
else:
extreme_values.append(local_data[self.column].values.copy().min())
local_index = self.dataframe.index.values.copy()[
local_data[local_data[self.column].values == extreme_values[-1]]['id']
]
if np.isscalar(local_index):
extreme_indexes.append(local_index)
else:
extreme_indexes.append(local_index[0])
self.extremes = pd.DataFrame(data=extreme_values, columns=[self.column], index=extreme_indexes)
# Peaks Over Threshold method
elif method == 'POT':
self.threshold = kwargs.pop('threshold')
r = kwargs.pop('r', 24)
adjust_threshold = kwargs.pop('adjust_threshold', True)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
self.extremes_method = 'Peaks Over Threshold'
# Make sure correct number of blocks is used (overrides previously created BM values)
if isinstance(self.number_of_blocks, int):
self.number_of_blocks = self.__get_blocks(gap_length=self.gap_length)
# Extract raw extremes
if self.extremes_type == 'high':
self.extremes = self.dataframe[self.dataframe[self.column] > self.threshold][self.column].to_frame()
else:
self.extremes = self.dataframe[self.dataframe[self.column] < self.threshold][self.column].to_frame()
# Decluster raw extremes using runs method
if r is not None:
r = np.timedelta64(pd.Timedelta(hours=r))
last_cluster_index = self.extremes.index.values.copy()[0]
peak_cluster_values = [self.extremes[self.column].values.copy()[0]]
peak_cluster_indexes = [self.extremes.index.values.copy()[0]]
for index, value in zip(self.extremes.index, self.extremes[self.column].values):
# New cluster encountered
if index - last_cluster_index > r:
peak_cluster_values.append(value)
peak_cluster_indexes.append(index)
# Continuing within current cluster
else:
# Update cluster peak
if self.extremes_type == 'high':
if value > peak_cluster_values[-1]:
peak_cluster_values[-1] = value
peak_cluster_indexes[-1] = index
else:
if value < peak_cluster_values[-1]:
peak_cluster_values[-1] = value
peak_cluster_indexes[-1] = index
# Index of previous cluster - lags behind <index> by 1
last_cluster_index = index
self.extremes = pd.DataFrame(
data=peak_cluster_values, index=peak_cluster_indexes, columns=[self.column]
)
# Update threshold to smallest/largest extreme value in order to fix the GPD location parameter at 0.
# GPD is very unstable with non-zero location.
if adjust_threshold:
if self.extremes_type == 'high':
self.threshold = self.extremes[self.column].values.min()
else:
self.threshold = self.extremes[self.column].values.max()
else:
raise ValueError(f'Method {method} not recognized')
self.extremes.index.name = self.dataframe.index.name
# Calculate rate of extreme events (events/block)
self.extremes_rate = len(self.extremes) / self.number_of_blocks
# Assign ranks to data with duplicate values having average of ranks they would have individually
self.plotting_position = plotting_position
self.extremes['Return Period'] = self.__get_return_period(plotting_position=self.plotting_position)
# Update internal status
self.__status = dict(
extremes=True,
fit=False,
results=False
)
self.__update()
def __get_return_period(self, plotting_position, return_cdf=False):
"""
Assigns return periods to extracted extreme events and updates the <self.extremes> index.
Parameters
----------
plotting_position : str
Plotting position. Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
return_cdf : bool, optional
If True, returns cdf of extracted extremes (default=False).
"""
# Assign ranks to data with duplicate values having average of ranks they would have individually
if self.extremes_type == 'high':
ranks = scipy.stats.rankdata(self.extremes[self.column].values, method='average')
else:
ranks = len(self.extremes) + 1 - scipy.stats.rankdata(self.extremes[self.column].values, method='average')
# Calculate return periods using a specified plotting position
# https://matplotlib.org/mpl-probscale/tutorial/closer_look_at_plot_pos.html
plotting_positions = {
'ECDF': (0, 1),
'Hazen': (0.5, 0.5),
'Weibull': (0, 0),
'Laplace': (-1, -1),
'Tukey': (1 / 3, 1 / 3),
'Blom': (3 / 8, 3 / 8),
'Median': (0.3175, 0.3175),
'Cunnane': (0.4, 0.4),
'Gringorten': (0.44, 0.44),
'Gumbel': (1, 1)
}
if plotting_position not in plotting_positions:
raise ValueError(f'Plotting position {plotting_position} not recognized')
alpha, beta = plotting_positions[plotting_position][0], plotting_positions[plotting_position][1]
cdf = (ranks - alpha) / (len(self.extremes) + 1 - alpha - beta)
if return_cdf:
return cdf
# Survival function - aka upper tail probability or probability of exceedance
sf = 1 - cdf
return 1 / sf / self.extremes_rate
def plot_extremes(self):
"""
Plots extracted extreme values on top of <self.dataframe> <self.column> observed time series.
Shows boundaries of blocks for the Block Maxima method and threshold level for the Peaks Over Threshold method.
Returns
-------
tuple(fig, ax)
"""
# Make sure extreme values have been extracted
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Run self.get_extremes() first')
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
self.extremes.index, self.extremes[self.column],
edgecolors='white', marker='s', facecolors='k', s=40, lw=1, zorder=15
)
if self.gap_length is None:
ax.plot(
self.dataframe.index, self.dataframe[self.column],
color='#3182bd', lw=.5, alpha=.8, zorder=5
)
else:
for x, y in zip(self.dataframe_declustered[0], self.dataframe_declustered[1]):
ax.plot(x, y, color='#3182bd', lw=.5, alpha=.8, zorder=5)
if self.extremes_method == 'Block Maxima':
for _block in self.block_boundaries:
ax.axvline(_block, color='k', ls='--', lw=1, zorder=10)
elif self.extremes_method == 'Peaks Over Threshold':
ax.axhline(self.threshold, color='k', ls='--', lw=1, zorder=10)
ax.set_title(f'Extreme Values Time Series, {self.extremes_method}')
if len(self.dataframe.index.name) > 0:
ax.set_xlabel(f'{self.dataframe.index.name}')
else:
ax.set_xlabel('Date')
ax.set_ylabel(f'{self.column}')
annot = ax.annotate(
'', xy=(self.extremes.index[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
self.extremes.index[0], self.extremes[self.column].values[0],
edgecolors='white', marker='s', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return period : {self.extremes["Return Period"].values[n]:.2f}\n'
f'Plotting position : {self.plotting_position}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
def plot_mean_residual_life(self, thresholds=None, r=24, alpha=.95, extremes_type='high',
adjust_threshold=True, limit=10, plot=True):
"""
Plots means of residuals against thresholds.
Threshold should be chosen as the smallest threshold in a region where the mean residuals' plot
is approximately linear. Generalized Pareto Distribution is asymptotically valid in this region.
Parameters
----------
thresholds : array_like, optional
Array with threshold values for which the plot is generated.
Default .95 quantile to max for 'high' and min to .05 quantile for 'low', 100 values.
r : float, optional
POT method only: minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
limit : int, optional
Minimum number of exceedances (peaks) for which calculations are performed (default=10).
plot : bool, optional
Generates plot if True, returns data if False (default=True).
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False : tuple(thresholds, residuals, confidence_low, confidence_top)
"""
if thresholds is None:
if extremes_type == 'high':
thresholds = np.linspace(
np.quantile(self.dataframe[self.column].values, .95),
self.dataframe[self.column].values.max(),
100
)
else:
thresholds = np.linspace(
self.dataframe[self.column].values.min(),
np.quantile(self.dataframe[self.column].values, .05),
100
)
if np.isscalar(thresholds):
raise ValueError('Thresholds must be an array. A scalar was provided')
thresholds = np.sort(thresholds)
if extremes_type == 'high':
thresholds = thresholds[thresholds < self.dataframe[self.column].values.max()]
else:
thresholds = thresholds[thresholds > self.dataframe[self.column].values.min()]
# Find mean residuals and 95% confidence interval for each threshold
residuals, confidence = [], []
true_thresholds = []
for u in thresholds:
self.get_extremes(
method='POT', threshold=u, r=r,
adjust_threshold=adjust_threshold, extremes_type=extremes_type
)
true_thresholds.append(self.threshold)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if extremes_type == 'low':
exceedances *= -1
if len(exceedances) > limit:
residuals.append(exceedances.mean())
# Ubiased estimator of sample variance of mean s^2/n
confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=exceedances.mean(),
scale=exceedances.std(ddof=1)/np.sqrt(len(exceedances))
)
)
else:
residuals.append(np.nan)
confidence.append((np.nan, np.nan))
residuals = np.array(residuals)
confidence = np.array(confidence)
# Remove non-unique values
if adjust_threshold:
thresholds, mask = np.unique(true_thresholds, return_index=True)
residuals = residuals[mask]
confidence = confidence[mask]
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
# Generate mean residual life plot
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.set_title('Mean Residual Life Plot')
ax.plot(thresholds, residuals, color='k', zorder=10, label='Mean residual life', lw=2)
ax.plot(thresholds, confidence.T[0], ls='--', color='k', lw=0.5, zorder=10)
ax.plot(thresholds, confidence.T[1], ls='--', color='k', lw=0.5, zorder=10)
ax.fill_between(
thresholds, confidence.T[0], confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval', zorder=5
)
ax.legend()
ax.set_xlabel('Threshold')
ax.set_ylabel('Mean Residual')
fig.tight_layout()
return fig, ax
else:
return thresholds, residuals, confidence.T[0], confidence.T[1]
def plot_parameter_stability(self, thresholds=None, r=24, alpha=.95, extremes_type='high',
adjust_threshold=True, limit=10, plot=True, dx='1e-10', precision=100):
"""
Plots shape and modified scale paramters of the Generalized Pareto Distribution (GPD) against thresholds.
GPD is asymptotically valid in a region where these parameters are approximately linear.
Parameters
----------
thresholds : array_like, optional
Array with threshold values for which the plot is generated.
Default .95 quantile to max for 'high' and min to .05 quantile for 'low', 100 values.
r : float, optional
Minimum distance in hours between events for them to be considered independent.
Used to decluster extreme values using the runs method (default=24).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
extremes_type : str, optional
Specifies type of extremes extracted: 'high' yields max values, 'low' yields min values (defaul='high').
Use 'high' for extreme high values, use 'low' for extreme low values.
adjust_threshold : bool, optional
If True, sets threshold equal to smallest/largest exceedance.
This way Generalized Pareto Distribution location parameter is strictly 0.
Eliminates instabilities associated with estimating location (default=True).
limit : int, optional
Minimum number of exceedances (peaks) for which calculations are performed (default=10).
plot : bool, optional
Generates plot if True, returns data if False (default=True).
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10').
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False :
if alpha is None : tuple(thresholds, shapes, modified_scales)
if alpha is passed : tuple(thresholds, shapes, modified_scales, shapes_confidence, scales_confidence)
"""
if thresholds is None:
if extremes_type == 'high':
thresholds = np.linspace(
np.quantile(self.dataframe[self.column].values, .95),
self.dataframe[self.column].values.max(),
100
)
else:
thresholds = np.linspace(
self.dataframe[self.column].values.min(),
np.quantile(self.dataframe[self.column].values, .05),
100
)
if np.isscalar(thresholds):
raise ValueError('Thresholds must be an array. A scalar was provided')
thresholds = np.sort(thresholds)
if extremes_type == 'high':
thresholds = thresholds[thresholds < self.dataframe[self.column].values.max()]
else:
thresholds = thresholds[thresholds > self.dataframe[self.column].values.min()]
shapes, modified_scales = [], []
shapes_confidence, scales_confidence = [], []
true_thresholds = []
for u in thresholds:
self.get_extremes(
method='POT', threshold=u, r=r,
adjust_threshold=adjust_threshold, extremes_type=extremes_type
)
true_thresholds.append(self.threshold)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if extremes_type == 'low':
exceedances *= -1
if len(exceedances) > limit:
shape, loc, scale = scipy.stats.genpareto.fit(exceedances, floc=0)
shapes.append(shape)
# Define modified scale function (used as scalar function for delta method)
if extremes_type == 'high':
def mod_scale_function(*theta):
return theta[1] - theta[0] * true_thresholds[-1]
else:
def mod_scale_function(*theta):
return theta[1] + theta[0] * true_thresholds[-1]
modified_scales.append(mod_scale_function(shape, scale))
if alpha is not None:
with mpmath.workdps(precision):
# Define modified log_likehood function
def log_likelihood(*theta):
return mpmath.fsum(
[
mpmath.log(
coastlib.stats.distributions.genpareto.pdf(
x=_x, shape=theta[0], loc=0, scale=theta[1]
)
) for _x in exceedances
]
)
# Calculate delta (gradient) of scalar_function
if extremes_type == 'high':
delta_scalar = np.array(
[
[-true_thresholds[-1]],
[1]
]
)
else:
delta_scalar = np.array(
[
[true_thresholds[-1]],
[1]
]
)
# Calculate observed information matrix (negative hessian of log_likelihood)
observed_information = -coastlib.math.derivatives.hessian(
func=log_likelihood, n=2, coordinates=[shape, scale], dx=dx, precision=precision
).astype(np.float64)
covariance = np.linalg.inv(observed_information)
# Estimate modified scale parameter confidence interval using delta method
variance = np.dot(
np.dot(delta_scalar.T, covariance), delta_scalar
).flatten()[0]
scales_confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=modified_scales[-1], scale=np.sqrt(variance)
)
)
# Estimate shape parameter confidence interval directly from covariance matrix
shapes_confidence.append(
scipy.stats.norm.interval(
alpha=alpha, loc=shape, scale=np.sqrt(covariance[0][0])
)
)
# Number of exceedances below the limit
else:
shapes.append(np.nan)
modified_scales.append(np.nan)
if alpha is not None:
shapes_confidence.append((np.nan, np.nan))
scales_confidence.append((np.nan, np.nan))
# Convert results to np.ndarray objects
shapes = np.array(shapes)
modified_scales = np.array(modified_scales)
if alpha is not None:
shapes_confidence = np.array(shapes_confidence)
scales_confidence = np.array(scales_confidence)
# Remove non-unique values
if adjust_threshold:
thresholds, mask = np.unique(true_thresholds, return_index=True)
shapes = shapes[mask]
modified_scales = modified_scales[mask]
if alpha is not None:
shapes_confidence = shapes_confidence[mask]
scales_confidence = scales_confidence[mask]
# Update internal status
self.__status = dict(
extremes=False,
fit=False,
results=False
)
self.__update()
if plot:
with plt.style.context('bmh'):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8), sharex='all')
ax1.set_title('Parameter Stability Plot')
ax1.plot(thresholds, shapes, color='k', zorder=10, label='Shape parameter')
ax2.plot(thresholds, modified_scales, color='k', zorder=10, label='Modified scale parameter', lw=2)
if alpha is not None:
ax1.plot(thresholds, shapes_confidence.T[0], ls='--', color='k', lw=0.5)
ax1.plot(thresholds, shapes_confidence.T[1], ls='--', color='k', lw=0.5)
ax2.plot(thresholds, scales_confidence.T[0], ls='--', color='k', lw=0.5)
ax2.plot(thresholds, scales_confidence.T[1], ls='--', color='k', lw=0.5)
ax1.fill_between(
thresholds, shapes_confidence.T[0], shapes_confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval'
)
ax2.fill_between(
thresholds, scales_confidence.T[0], scales_confidence.T[1],
alpha=.1, color='k', label=f'{alpha*100:.0f}% confidence interval'
)
ax2.set_xlabel('Threshold')
ax1.set_ylabel('Shape parameter')
ax2.set_ylabel('Modified scale parameter')
ax1.legend()
ax2.legend()
fig.tight_layout()
return fig, (ax1, ax2)
else:
if alpha is None:
return thresholds, shapes, modified_scales
else:
return thresholds, shapes, modified_scales, shapes_confidence, scales_confidence
def test_extremes(self, method, **kwargs):
"""
Provides multiple methods to test independece of extracted extreme values.
Parameters
----------
method : str
Method for testing extreme values' independence.
Accepted methods:
'autocorrelation' - generates an autocorrelation plot
http://www.statsmodels.org/stable/generated/
statsmodels.tsa.stattools.acf.html#statsmodels.tsa.stattools.acf
'lag plot' - generates a lag plot for a given lag
'runs test' - return runs test statistic
https://en.wikipedia.org/wiki/Wald%E2%80%93Wolfowitz_runs_test
kwargs
for autocorrelation:
plot : bool, optional
Generates plot if True, returns data if False (default=True).
nlags : int, optional
Number of lags to return autocorrelation for (default for all possible lags).
alpha : float, optional
Confidence interval (default=.95). If None, doesn't plot or return confidence limits.
unbiased : bool, optional
If True, then denominators for autocovariance are n-k, otherwise n (default=False)
for lag plot:
plot : bool, optional
Generates plot if True, returns data if False (default=True).
lag : int, optional
Lag value (default=1).
for runs test:
alpha : float, optional
Significance level (default=0.05).
Returns
-------
for autocorrelation:
if plot=True : tuple(fig, ax)
if plot=False : tuple(lags, acorr, ci_low, ci_top)
for lag plot:
if plot=True : tuple(fig, ax)
if plot=False : tuple(x, y)
for runs test:
str(test summary)
"""
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Nothing to test')
if method == 'autocorrelation':
plot = kwargs.pop('plot', True)
nlags = kwargs.pop('nlags', len(self.extremes) - 1)
alpha = kwargs.pop('alpha', .95)
unbiased = kwargs.pop('unbiased', False)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
acorr, ci = sm.tsa.stattools.acf(
x=self.extremes[self.column].values, alpha=1-alpha, nlags=nlags, unbiased=unbiased
)
ci_low, ci_top = ci.T[0] - acorr, ci.T[1] - acorr
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.vlines(np.arange(nlags+1), [0], acorr, lw=1, color='k', zorder=15)
points = ax.scatter(
np.arange(nlags+1), acorr, marker='o', s=40, lw=1,
facecolor='k', edgecolors='white', zorder=20, label='Autocorrelation value'
)
ax.plot(np.arange(nlags+1)[1:], ci_low[1:], color='k', lw=.5, ls='--', zorder=15)
ax.plot(np.arange(nlags+1)[1:], ci_top[1:], color='k', lw=.5, ls='--', zorder=15)
ax.fill_between(
np.arange(nlags+1)[1:], ci_low[1:], ci_top[1:],
color='k', alpha=.1, zorder=5, label=f'{alpha*100:.0f}% confidence interval'
)
ax.axhline(0, color='k', lw=1, ls='--', zorder=10)
ax.legend()
ax.set_title('Autocorrelation plot')
ax.set_xlabel('Lag')
ax.set_ylabel('Correlation coefficient')
annot = ax.annotate(
'', xy=(0, 0),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
0, 0,
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=25
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Lag : {np.arange(nlags+1)[n]:d}\n'
f'Correlation : {acorr[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
return np.arange(nlags+1), acorr, ci_low, ci_top
elif method == 'lag plot':
plot = kwargs.pop('plot', True)
lag = kwargs.pop('lag', 1)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
if lag == 0:
x = self.extremes[self.column].values
else:
x = self.extremes[self.column].values[:-lag]
y = self.extremes[self.column].values[lag:]
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
x, y, marker='o', facecolor='k', s=40, edgecolors='white', lw=1, zorder=5
)
ax.set_xlabel(f'{self.column} i')
ax.set_ylabel(f'{self.column} i+{lag}')
ax.set_title('Extreme Values Lag Plot')
annotation = ax.annotate(
"", xy=(0, 0), xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
np.nanmean(x), np.nanmean(y),
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annotation.set_visible(False)
def update_annotation(ind):
pos = points.get_offsets()[ind['ind'][0]]
annotation.xy = pos
point.set_offsets(pos)
text = "{}".format(" ".join(
[
f'{self.extremes.index[n]} : {ind["ind"][0]}\n'
f'{self.extremes.index[n+lag]} : {ind["ind"][0]+lag}'
for n in ind['ind']
]))
annotation.set_text(text)
def hover(event):
vis = annotation.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annotation(ind)
annotation.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annotation.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
return x, y
elif method == 'runs test':
alpha = kwargs.pop('alpha', .05)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Calculate number of runs of shifted series
s = self.extremes[self.column].values - np.quantile(self.extremes[self.column].values, .5)
n_plus = np.sum(s > 0)
n_minus = np.sum(s < 0)
n_runs = 1
for i in range(1, len(s)):
# Change of sign
if s[i] * s[i-1] < 0:
n_runs += 1
mean = 2 * n_plus * n_minus / len(s) + 1
variance = (mean - 1) * (mean - 2) / (len(s) - 1)
test_statistic = (n_runs-mean)/np.sqrt(variance)
return str(
f'Ho : data is random\n'
f'Ha : data is not random\n\n'
f'Test statistic : N = {test_statistic:.2f}\n'
f'Significanse level : alpha = {alpha}\n'
f'Critical value : Nalpha = {scipy.stats.norm.ppf(1 - alpha / 2):.2f}\n'
f'Reject Ho if |N| > Nalpha'
)
else:
raise ValueError(f'Method {method} not recognized. Try: autocorrelation')
def fit(self, distribution_name, fit_method='MLE', **kwargs):
"""
Depending on fit method, either creates a tuple with maximum likelihood estimate (MLE)
or an array with samples drawn from posterior distribution of parameters (MCMC).
Parameters
----------
distribution_name : str
Scipy distribution name (see https://docs.scipy.org/doc/scipy/reference/stats.html).
fit_method : str, optional
Fit method - MLE (Maximum Likelihood Estimate, scipy)
or Markov chain Monte Carlo (MCMC, emcee) (default='MLE').
kwargs:
for MLE:
scipy_fit_options : dict, optional
Special scipy fit options like <fc>, <loc>, or <floc>.
For GPD scipy_fit_options=dict(floc=0) by default (fixed location parameter at 0).
This parameter is carried over to further calculations, such as confidence interval.
for MCMC:
nsamples : int, optional
Number of samples each walker draws (default=1000).
Larger values result in longer processing time, but can lead to better convergence.
nwalkers : int, optional
Number of walkers (default=200). Each walker explores the parameter space.
Larger values result in longer processing time,
but more parameter space is explored (higher chance to escape local maxima).
log_prior : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns sum of log-probabilities (logpdf) for each parameter within theta.
By default is uniform for each parameter.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
log_likelihood : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns log-likelihood (sum of logpdf) for given parameters.
By default is sum(logpdf) of scipy distribution with <distribution_name>.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
starting_bubble : float, optional
Radius of bubble from <starting_position> within which
starting parameters for each walker are set (default=1e-2).
starting_position : array_like, optional
Array with starting parameters for each walker (default=None).
If None, then zeroes are chosen as starting parameter.
fixed_parameters : array_like, optional
An array with tuples with index of parameter being fixed "i" and parameter value "v" [(i, v),...]
for each parameter being fixed (default [(1,0)] for GPD, None for other).
Works only with custom distributions. Must be sorted in ascending order by "i".
"""
# Make sure extreme values have been extracted
if not self.__status['extremes']:
raise RuntimeError('Extreme values have not been extracted. Nothing to fit')
# Update internal status
self.__status = dict(
extremes=True,
fit=False,
results=False
)
self.__update()
if fit_method == 'MLE':
if distribution_name == 'genpareto':
self.scipy_fit_options = kwargs.pop('scipy_fit_options', dict(floc=0))
else:
self.scipy_fit_options = kwargs.pop('scipy_fit_options', {})
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Create local distribution object
distribution_object = getattr(scipy.stats, distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
self.fit_parameters = distribution_object.fit(exceedances, **self.scipy_fit_options)
elif fit_method == 'MCMC':
self.mcmc_chain = self.__run_mcmc(distribution_name, **kwargs)
else:
raise ValueError(f'Fit method {fit_method} not recognized')
# On successful fit assign the fit_ variables
self.fit_method = fit_method
self.distribution_name = distribution_name
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=False
)
self.__update()
def __run_mcmc(self, distribution_name, nsamples=1000, nwalkers=200, **kwargs):
"""
Runs emcee Ensemble Sampler to sample posteriot probability of fit parameters given observed data.
Returns sampler chain with <nsamples> for each parameter for each <nwalkers>.
See http://dfm.io/emcee/current/
Parameters
----------
distribution_name : str
Scipy distribution name (see https://docs.scipy.org/doc/scipy/reference/stats.html).
nsamples : int, optional
Number of samples each walker draws (default=1000).
Larger values result in longer processing time, but can lead to better convergence.
nwalkers : int, optional
Number of walkers (default=200). Each walker explores the parameter space.
Larger values result in longer processing time,
but more parameter space is explored (higher chance to escape local maxima).
kwargs
log_prior : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns sum of log-probabilities (logpdf) for each parameter within theta.
By default is uniform for each parameter.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
log_likelihood : callable, optional
Function taking one parameter - list with fit parameters (theta).
Returns log-likelihood (sum of logpdf) for given parameters.
By default is sum(logpdf) of scipy distribution with <distribution_name>.
read http://dfm.io/emcee/current/user/line/
Default functions are defined only for 3-parameter GEV and 3- and 2-parameter (loc=0) GPD.
starting_bubble : float, optional
Radius of bubble from <starting_position> within which
starting parameters for each walker are set (default=1e-2).
starting_position : array_like, optional
Array with starting parameters for each walker (default=None).
If None, then zeroes are chosen as starting parameter.
fixed_parameters : array_like, optional
An array with tuples with index of parameter being fixed "i" and parameter value "v" [(i, v),...]
for each parameter being fixed (default [(1,0)] for GPD, None for other).
Works only with custom distributions. Must be sorted in ascending order by "i".
Returns
-------
Generates an np.ndarray in self.mcmc_chain
Ensemble Sampler chain with <nsamples> for each parameter for each <nwalkers>.
"""
log_prior = kwargs.pop('log_prior', None)
log_likelihood = kwargs.pop('log_likelihood', None)
starting_bubble = kwargs.pop('starting_bubble', 1e-2)
starting_position = kwargs.pop('starting_position', None)
if distribution_name == 'genpareto':
self.fixed_parameters = kwargs.pop('fixed_parameters', [(1, 0)])
else:
self.fixed_parameters = kwargs.pop('fixed_parameters', None)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
if self.fixed_parameters == [(1, 0)] and distribution_name == 'genpareto':
pass
else:
if self.fixed_parameters is not None:
if (log_prior is None) or (log_likelihood is None) or (starting_position is None):
raise ValueError(
'<fixed_parameter> only works with custom prior and likelihood functions.\n'
'Starting position should be provided for the fixed_parameters case'
)
distribution_object = getattr(scipy.stats, distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
# Define log_prior probability function (uniform by default)
if log_prior is None:
if distribution_name == 'genpareto':
# https://en.wikipedia.org/wiki/Generalized_Pareto_distribution
if self.fixed_parameters == [(1, 0)]:
def log_prior(theta):
shape, scale = theta
if scale <= 0:
return -np.inf
return 0
else:
def log_prior(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint
if shape >= 0:
condition = np.all(exceedances >= loc)
else:
condition = np.all(exceedances >= loc) and np.all(exceedances <= loc - scale / shape)
if condition:
return 0
else:
return -np.inf
elif distribution_name == 'genextreme':
# https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
def log_prior(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint (scipy shape has inverted sign)
shape *= -1
if shape > 0:
condition = np.all(exceedances >= loc - scale / shape)
elif shape == 0:
condition = True
else:
condition = np.all(exceedances <= loc - scale / shape)
if condition:
return 0
else:
return -np.inf
else:
raise NotImplementedError(
f'Log-prior function is not implemented for {distribution_name} parameters.\n'
f'Define manually and pass to <log_prior=>.'
)
# Define log_likelihood function
if log_likelihood is None:
if distribution_name == 'genpareto':
# https://en.wikipedia.org/wiki/Generalized_Pareto_distribution
if self.fixed_parameters == [(1, 0)]:
def log_likelihood(theta):
shape, scale = theta
if scale <= 0:
return -np.inf
return np.sum(distribution_object.logpdf(exceedances, shape, 0, scale))
else:
def log_likelihood(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint
if shape >= 0:
condition = np.all(exceedances >= loc)
else:
condition = np.all(exceedances >= loc) and np.all(exceedances <= loc - scale / shape)
if condition:
return np.sum(distribution_object.logpdf(exceedances, *theta))
else:
return -np.inf
elif distribution_name == 'genextreme':
# https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
def log_likelihood(theta):
shape, loc, scale = theta
# Parameter constraint
if scale <= 0:
return -np.inf
# Support constraint (scipy shape has inverted sign)
shape *= -1
if shape > 0:
condition = np.all(exceedances >= loc - scale / shape)
elif shape == 0:
condition = True
else:
condition = np.all(exceedances <= loc - scale / shape)
if condition:
return np.sum(distribution_object.logpdf(exceedances, *theta))
else:
return -np.inf
else:
raise NotImplementedError(
f'Log-likelihood function is not implemented for {distribution_name} parameters.\n'
f'Define manually and pass to <log_likelihood=>.'
)
# Define log_posterior probability function (not exact - excludes marginal evidence probability)
def log_posterior(theta):
return log_likelihood(theta) + log_prior(theta)
# Set MCMC walkers' starting positions to 0
# (setting to MLE makes algorithm unstable due to being stuck in local maxima)
if starting_position is None:
if distribution_name == 'genpareto' and self.fixed_parameters == [(1, 0)]:
theta_0 = np.array([0, 0])
elif distribution_name in ['genextreme', 'genpareto']:
theta_0 = np.array([0, 0, 0])
else:
theta_0 = distribution_object.fit(exceedances)
starting_position = [[0] * len(theta_0) for _ in range(nwalkers)]
# Randomize starting positions to force walkers explore the parameter space
starting_position = [
np.array(sp) + starting_bubble * np.random.randn(len(starting_position[0]))
for sp in starting_position
]
if len(starting_position) != nwalkers:
raise ValueError(f'Number of starting positions {len(starting_position)} '
f'must be equal to number of walkers {nwalkers}')
ndim = len(starting_position[0])
# Setup the Ensemble Sampler and draw samples from posterior distribution for specified number of walkers
self.__sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior)
self.__sampler.run_mcmc(starting_position, nsamples)
# Fill in fixed parameter values
sampler_chain = self._EVA__sampler.chain.copy()
if self.fixed_parameters is not None:
fp = np.transpose(self.fixed_parameters)
ndim = sampler_chain.shape[-1] + len(self.fixed_parameters)
mcmc_chain = np.array(
[
[
[np.nan] * ndim for _ in range(sampler_chain.shape[1])
] for _ in range(sampler_chain.shape[0])
]
)
for i in range(mcmc_chain.shape[0]):
for j in range(mcmc_chain.shape[1]):
counter = 0
for k in range(mcmc_chain.shape[2]):
if k in fp[0]:
mcmc_chain[i][j][k] = fp[1][fp[0] == k][0]
else:
mcmc_chain[i][j][k] = sampler_chain[i][j][counter]
counter += 1
sampler_chain = np.array(mcmc_chain)
return sampler_chain
def _kernel_fit_parameters(self, burn_in, kernel_steps=1000):
"""
Estimate mode of each parameter as peaks of gaussian kernel.
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Returns
-------
np.ndarray
Modes of parameters.
"""
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method != 'MCMC':
raise ValueError('Fit method must be MCMC')
# Load samples
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
# Estimate mode of each parameter as peaks of gaussian kernel.
parameters = []
for i, p in enumerate(samples.T):
if self.fixed_parameters is None or (i not in np.transpose(self.fixed_parameters)[0]):
p_filtered = p[~np.isnan(p)]
kernel = scipy.stats.gaussian_kde(p_filtered)
support = np.linspace(
np.quantile(p_filtered, .1), np.quantile(p_filtered, .9),
kernel_steps
)
density = kernel.evaluate(support)
parameters.append(support[density.argmax()])
else:
parameters.append(p[0])
return np.array(parameters)
def plot_trace(self, burn_in, true_theta=None, labels=None):
"""
Plots traces for each parameter. Each trace plot shows all samples for each walker
after first <burn_in> samples are discarded. This method is used to verify fit stability
and to determine the optimal <burn_in> value.
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
true_theta : array_like, optional
Array with true (known) values of parameters (default=None). If given, are shown on trace plots.
labels : array_like, optional
List of labels for each parameter (e.g. shape, loc, scale) (default - index).
Returns
-------
tuple(fig, axes)
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('No mcmc_chain attribute found.')
if labels is None:
labels = [f'Parameter {i+1}' for i in range(self.__sampler.chain.shape[-1])]
# Generate trace plot
ndim = self.__sampler.chain.shape[-1]
with plt.style.context('bmh'):
fig, axes = plt.subplots(ndim, 1, figsize=(12, 8), sharex='all')
if ndim == 1:
axes.set_title('MCMC Trace Plot')
axes.set_xlabel('Sample number')
else:
axes[0].set_title('MCMC Trace Plot')
axes[-1].set_xlabel('Sample number')
for i in range(ndim):
for swalker in self.__sampler.chain:
if ndim == 1:
axes.plot(
np.arange(len(swalker.T[i]))[burn_in:],
swalker.T[i][burn_in:],
color='k', lw=0.1, zorder=5
)
axes.set_ylabel(labels[i])
else:
axes[i].plot(
np.arange(len(swalker.T[i]))[burn_in:],
swalker.T[i][burn_in:],
color='k', lw=0.1, zorder=5
)
axes[i].set_ylabel(labels[i])
if true_theta is not None:
if ndim == 1:
axes.axhline(true_theta[i], color='orangered', lw=2, zorder=10)
else:
axes[i].axhline(true_theta[i], color='orangered', lw=2, zorder=10)
fig.tight_layout()
return fig, axes
def plot_corner(self, burn_in, bins=100, labels=None, figsize=(12, 12), **kwargs):
"""
Generate corner plot showing the projections of a data set in a multi-dimensional space.
See https://corner.readthedocs.io/en/latest/api.html#corner.corner
Parameters
----------
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
bins : int, optional
See https://corner.readthedocs.io/en/latest/api.html#corner.corner (default=50).
labels : array_like, optional
List of labels for each parameter (e.g. shape, loc, scale) (default - index).
figsize : tuple, optional
Figure size (default=(12, 12)).
kwargs
Corner plot keywords. See https://corner.readthedocs.io/en/latest/api.html#corner.corner
Returns
-------
tuple(fig, ax)
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('mcmc_chain attribute not found')
# Generate labels
ndim = self.__sampler.chain.shape[-1]
if labels is None:
labels = np.array([f'Parameter {i + 1}' for i in range(ndim)])
samples = self.__sampler.chain[:, burn_in:, :].reshape((-1, ndim)).copy()
# Generate corner plot
fig, ax = plt.subplots(ndim, ndim, figsize=figsize)
fig = corner.corner(samples, bins=bins, labels=labels, fig=fig, **kwargs)
return fig, ax
def plot_posterior(self, rp, burn_in, alpha=.95, plot=True, kernel_steps=1000, bins=100):
"""
Returns posterior distribution of return value for a specific return period.
Can be used to explore the posterior distribution p(rv|self.extremes).
Parameters
----------
rp : float
Return period (1/rp represents probability of exceedance over self.block_size).
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
alpha : float, optional
Shows confidence bounds for given interval alpha (default=.95). Doesn't show if None.
plot : bool, optional
If True, plots histogram of return value (default=True). If False, return data
kernel_steps : int, optional
Number of bins (kernel support points) used to plot kernel density (default=1000).
bins : int, optional
Number of bins in historgram (default=100). Only when plot=True.
Returns
-------
Distribution of return value for a given return period
if plot = True : tuple(fig, ax)
if plot = Fale : np.ndarray
"""
# Make sure self.mcmc_chain exists
if self.mcmc_chain is None:
raise RuntimeError('No mcmc_chain attribute found.')
if not np.isscalar(rp):
raise ValueError('rp must be scalar')
distribution_object = getattr(scipy.stats, self.distribution_name)
# Calculate return value for each fit parameters sample
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
if self.extremes_type == 'high':
return_values = np.array(
[
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
else:
return_values = np.array(
[
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
# Set up gaussian kernel
support = np.linspace(return_values.min(), return_values.max(), kernel_steps)
kernel = scipy.stats.gaussian_kde(return_values)
density = kernel.evaluate(support)
if plot:
with plt.style.context('bmh'):
fig, ax = plt.subplots(figsize=(12, 8))
ax.hist(
return_values, bins=bins, density=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax.hist(
return_values, bins=bins, density=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=.5, ls='--', zorder=10
)
ax.plot(
support, density,
color='k', lw=2, zorder=15
)
if alpha is not None:
ax.axvline(np.nanquantile(return_values, (1 - alpha) / 2), lw=1, color='k', ls='--')
ax.axvline(np.nanquantile(return_values, (1 + alpha) / 2), lw=1, color='k', ls='--')
if self.extremes_type == 'high':
ax.set_xlim(right=np.nanquantile(return_values, .999))
else:
ax.set_xlim(left=np.nanquantile(return_values, .001))
ax.set_title(f'{rp}-year Return Period Posterior Distribution')
ax.set_xlabel('Return value')
ax.set_ylabel('Probability density')
fig.tight_layout()
return fig, ax
else:
return return_values
def return_value(self, rp, **kwargs):
"""
Calculates return values for given return periods.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
float or array of floats
Return values for given return periods.
"""
return self.isf(1 / rp / self.extremes_rate, **kwargs)
def confidence_interval(self, rp, alpha=.95, **kwargs):
"""
Estimates confidence intervals for given return periods.
Parameters
----------
rp : float or array_like, optional
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
if fit is MLE
method : str, optional
Confidence interval estimation method (default='Monte Carlo').
Supported methods:
'Monte Carlo' - performs many random simulations to estimate return value distribution
'Delta' - delta method (assumption of asymptotic normality, fast but inaccurate)
Implemented only for specific distributions
'Profile Likelihood' - not yet implemented
if method is Monte Carlo
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to len(self.extremes)
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
if method is Delta
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10' for GPD and GEV, '1e-6' for others).
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method before generating confidence intervals')
if self.fit_method == 'MLE':
method = kwargs.pop('method', 'Monte Carlo')
if method == 'Monte Carlo':
return self.__monte_carlo(rp=rp, alpha=alpha, **kwargs)
elif method == 'Delta':
return self.__delta(rp=rp, alpha=alpha, **kwargs)
elif method in ['Profile Likelihood']:
# TODO - implement Profile Likelihood mehtod
raise NotImplementedError(f'Method {method} not implemented')
else:
raise ValueError(f'Method {method} not recognized')
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
alpha = kwargs.pop('alpha', .95)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
distribution_object = getattr(scipy.stats, self.distribution_name)
# Calculate return values for each fit parameters sample
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
if self.extremes_type == 'high':
return_values = np.array(
[
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
else:
return_values = np.array(
[
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *theta
) for theta in samples
]
)
# Calculate quantiles for lower and upper confidence bounds for each return period
if np.isscalar(rp):
return (
np.nanquantile(a=return_values.flatten(), q=(1 - alpha) / 2),
np.nanquantile(a=return_values.flatten(), q=(1 + alpha) / 2)
)
else:
return np.array(
[
[np.nanquantile(a=row, q=(1 - alpha) / 2) for row in return_values.T],
[np.nanquantile(a=row, q=(1 + alpha) / 2) for row in return_values.T]
]
)
else:
raise RuntimeError(f'Unknown fit_method {self.fit_method} encountered')
def __monte_carlo(self, rp, alpha=.95, **kwargs):
"""
Runs the Monte Carlo confidence interval estimation method.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to len(self.extremes)
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
k = kwargs.pop('k', 1e4)
sampling_method = kwargs.pop('sampling_method', 'constant')
source = kwargs.pop('source', 'data')
assume_normality = kwargs.pop('assume_normality', False)
# TODO - implement a discard rule (discard bad samples)
# discard_rule = kwargs.pop('discard_rule', None)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
distribution_object = getattr(scipy.stats, self.distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
if self.extremes_type == 'low':
exceedances *= -1
# Sample from data case
if source == 'data':
if sampling_method == 'constant':
sample_size = len(self.extremes)
return_values = []
while len(return_values) < k:
sample = np.random.choice(a=exceedances, size=sample_size, replace=True)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
elif sampling_method == 'poisson':
return_values = []
while len(return_values) < k:
sample_size = scipy.stats.poisson.rvs(mu=len(self.extremes), loc=0, size=1)
sample_rate = sample_size / self.number_of_blocks
sample = np.random.choice(a=exceedances, size=sample_size, replace=True)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
elif sampling_method == 'jacknife':
sample_rate = (len(self.extremes) - 1) / self.number_of_blocks
return_values = []
for i in range(len(self.extremes)):
sample = np.delete(arr=exceedances, obj=i)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
raise ValueError(f'for <source=data> the sampling method must be <constant>, <poisson>, or <jacknife>,'
f' <{sampling_method}> was passed')
# Sample from distribution (parametric) case
elif source == 'parametric':
if sampling_method == 'constant':
sample_size = len(self.extremes)
return_values = []
while len(return_values) < k:
sample = distribution_object.rvs(*self.fit_parameters, size=sample_size)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / self.extremes_rate, *sample_fit_parameters
)
)
elif sampling_method == 'poisson':
return_values = []
while len(return_values) < k:
sample_size = scipy.stats.poisson.rvs(mu=len(self.extremes), loc=0, size=1)
sample_rate = sample_size / self.number_of_blocks
sample = distribution_object.rvs(*self.fit_parameters, size=sample_size)
sample_fit_parameters = distribution_object.fit(sample, **self.scipy_fit_options)
if self.extremes_type == 'high':
return_values.append(
self.threshold + distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
return_values.append(
self.threshold - distribution_object.isf(
1 / rp / sample_rate, *sample_fit_parameters
)
)
else:
raise ValueError(f'for <source=parametric> the sampling method must be <constant> or <poisson>,'
f' <{sampling_method}> was passed')
else:
raise ValueError(f'source must be either <data> or <parametric>, <{source}> was passed')
# Estimate confidence bounds for sampled return values
return_values = np.array(return_values)
if np.isscalar(rp):
if assume_normality:
return scipy.stats.norm.interval(
alpha=alpha, loc=np.nanmean(return_values), scale=np.nanstd(return_values, ddof=1)
)
else:
return (
np.nanquantile(a=return_values.flatten(), q=(1 - alpha) / 2),
np.nanquantile(a=return_values.flatten(), q=(1 + alpha) / 2)
)
else:
if assume_normality:
locations = np.array([np.nanmean(row) for row in return_values.T])
scales = np.array([np.nanstd(row, ddof=1) for row in return_values.T])
return np.transpose(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=scale)
for loc, scale in zip(locations, scales)
]
)
else:
return np.array(
[
[np.nanquantile(a=row, q=(1 - alpha) / 2) for row in return_values.T],
[np.nanquantile(a=row, q=(1 + alpha) / 2) for row in return_values.T]
]
)
def __delta(self, rp, alpha=.95, **kwargs):
"""
Estimates confidence intervals using the delta method. Assumes asymptotic normality.
Parameters
----------
rp : float or array_like
Return periods (1/rp represents probability of exceedance over self.block_size).
alpha : float, optional
Confidence interval bounds (default=.95).
kwargs
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10').
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
tuple of np.ndarray objects
Tuple with arrays with confidence intervals (lower, upper).
"""
dx = kwargs.pop('dx', '1e-10')
precision = kwargs.pop('precision', 100)
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method before generating confidence intervals')
# Check if a custom distribution with mpmath backend is defined
if self.distribution_name in coastlib.stats.distributions.distributions:
distribution_object = getattr(coastlib.stats.distributions, self.distribution_name)
else:
raise ValueError(f'Delta method is not implemented for {self.distribution_name} distribution')
# Account for custom fit parameters (custom genextreme has negative shape in scipy)
if self.distribution_name == 'genextreme':
fit_parameters = self.fit_parameters * np.array([-1, 1, 1])
elif self.distribution_name in ['genpareto']:
fit_parameters = self.fit_parameters
else:
raise ValueError(f'Delta method is not implemented for {self.distribution_name} distribution')
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
# Generalized Pareto Distribution
if self.distribution_name == 'genpareto':
if self.scipy_fit_options != dict(floc=0):
raise ValueError(
f'Delta method for genpareto is implemented only for the case of '
f'fixed location parameter {dict(floc=0)}, '
f'{self.scipy_fit_options} does not satisfy this criteria'
)
with mpmath.workdps(precision):
# Define modified log_likehood function (only shape and scale, location is fixed)
def log_likelihood(*theta):
return mpmath.fsum(
[
mpmath.log(
coastlib.stats.distributions.genpareto.pdf(
x=x, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
) for x in exceedances
]
)
# Calculate covariance matrix of shape and scale
observed_information = -coastlib.math.derivatives.hessian(
func=log_likelihood, n=2, dx=dx, precision=precision,
coordinates=(fit_parameters[0], fit_parameters[2])
).astype(np.float64)
covariance = np.linalg.inv(observed_information)
# Modify covariance matrix to include uncertainty in threshold exceedance probability
modified_covariance = np.zeros((3, 3))
modified_covariance[1:, 1:] = covariance
# Probability of exceeding threshold for all observations
eta_0 = len(self.extremes) / len(self.dataframe)
# Number of observations per year
ny = len(self.dataframe) / self.number_of_blocks
modified_covariance[0][0] = eta_0 * (1 - eta_0) / len(self.dataframe)
if np.isscalar(rp):
# Define scalar function as a function which takes arbitrary fit parameters and returns return values
def scalar_function(eta, *theta):
q = 1 / (rp * ny * eta)
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
else:
return self.threshold - distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=3, dx=dx, precision=precision,
coordinates=(eta_0, fit_parameters[0], fit_parameters[2])
)
loc = np.float64(
scalar_function(eta_0, fit_parameters[0], fit_parameters[2])
)
variance = np.dot(
np.dot(delta_scalar.T, modified_covariance), delta_scalar
).flatten().astype(np.float64)[0]
return scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
else:
locs, variances = [], []
for _rp in rp:
# Define scalar function as a function which takes arbitrary fit parameters
# and returns return values
def scalar_function(eta, *theta):
q = 1 / (_rp * ny * eta)
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
else:
return self.threshold - distribution_object.isf(
q=q, shape=theta[0], loc=fit_parameters[1], scale=theta[1]
)
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=3, dx=dx, precision=precision,
coordinates=(eta_0, fit_parameters[0], fit_parameters[2]),
)
locs.append(
np.float64(
scalar_function(eta_0, fit_parameters[0], fit_parameters[2])
)
)
variances.append(
np.dot(
np.dot(delta_scalar.T, modified_covariance), delta_scalar
).flatten().astype(np.float64)[0]
)
return np.array(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
for loc, variance in zip(locs, variances)
]
).T
# Generalized Extreme Distribtuion
elif self.distribution_name == 'genextreme':
if self.scipy_fit_options != {}:
raise ValueError(
f'Delta method for genextreme is implemented only for the case of '
f'unbound parameters {dict()}, '
f'{self.scipy_fit_options} does not satisfy this criteria'
)
# Calculate observed information matrix (negative hessian of log_likelihood)
observed_information = distribution_object.observed_information(
exceedances, *fit_parameters, dx=dx, precision=precision
).astype(np.float64)
if np.isscalar(rp):
# Define scalar function as a function which takes arbitrary fit parameters and returns return values
def scalar_function(*theta):
q = 1 / rp / self.extremes_rate
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(q, *theta)
else:
return self.threshold - distribution_object.isf(q, *theta)
# Calculate delta (gradient) of scalar_function
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=len(fit_parameters),
coordinates=fit_parameters, dx=dx, precision=precision
).astype(np.float64)
# Calculate location and scale (gaussian mean and sigma)
loc = np.float64(scalar_function(*fit_parameters))
variance = np.dot(
np.dot(delta_scalar.T, np.linalg.inv(observed_information)), delta_scalar
).flatten()[0]
return scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
else:
locs, variances = [], []
for _rp in rp:
# Define scalar function as a function which takes arbitrary fit parameters
# and returns return values
def scalar_function(*theta):
q = 1 / _rp / self.extremes_rate
if q <= 0 or q >= 1:
return np.nan
if self.extremes_type == 'high':
return self.threshold + distribution_object.isf(q, *theta)
else:
return self.threshold - distribution_object.isf(q, *theta)
# Calculate delta (gradient) of scalar_function
delta_scalar = coastlib.math.derivatives.gradient(
func=scalar_function, n=len(fit_parameters),
coordinates=fit_parameters, dx=dx, precision=precision
).astype(np.float64)
# Calculate location and scale (gaussian mean and sigma)
locs.append(np.float64(scalar_function(*fit_parameters)))
variances.append(
np.dot(
np.dot(delta_scalar.T, np.linalg.inv(observed_information)), delta_scalar
).flatten()[0]
)
return np.array(
[
scipy.stats.norm.interval(alpha=alpha, loc=loc, scale=np.sqrt(variance))
for loc, variance in zip(locs, variances)
]
).T
def generate_results(self, rp=None, alpha=.95, **kwargs):
"""
Generates a self.results dataframe with return values and, optionally, confidence intervals.
Used to generate data for output and reporting purpose (run the self.restuls.to_excel()) and to
produce a probability plot (summary).
Parameters
----------
rp : float or array_like, optional
Return periods (1/rp represents probability of exceedance over self.block_size).
By default is an array of return periods equally spaced on a log-scale from 0.001 to 1000.
alpha : float, optional
Confidence interval bounds (default=.95). Doesn't estimate confidence intervals if None.
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
ci_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
if fit is MLE
ci_kwargs
method : str, optional
Confidence interval estimation method (default='Monte Carlo').
Supported methods:
'Monte Carlo' - performs many random simulations to estimate return value distribution
'Delta' - delta method (assumption of asymptotic normality, fast but inaccurate)
Implemented only for specific distributions
'Profile Likelihood' - not yet implemented
if method is Monte Carlo
k : int, optional
Numeber of Monte Carlo simulations (default=1e4). Larger values result in slower simulation.
sampling_method : str, optional
Sampling method (default='constant'):
'constant' - number of extremes in each sample is constant and equal to
number of extracted extreme values
'poisson' - number of extremes is Poisson-distributed
'jacknife' - aka drop-one-out, works only when <source=data>
source : str, optional
Specifies where new data is sampled from (default='data'):
'data' - samples with replacement directly from extracted extreme values
'parametric' - samples from distribution with previously estimated (MLE) parameters
assume_normality : bool, optional
If True, assumes return values are normally distributed.
If False, estimates quantiles directly (default=False).
if method is Delta
dx : str, optional
String representing a float, which represents spacing at which partial derivatives
are estimated (default='1e-10' for GPD and GEV, '1e-6' for others).
precision : int, optional
Precision of floating point calculations (see mpmath library documentation) (default=100).
Derivative estimated with low <precision> value may have
a significant error due to rounding and under-/overflow.
Returns
-------
Creates a <self.results> dataframe with return values and, optionally, confidence intervals
for each given return period.
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if rp is None:
rp = np.unique(
np.append(
np.logspace(-3, 3, 200),
[1/12, 7/365.2425, 1, 2, 5, 10, 25, 50, 100, 200, 250, 500, 1000]
)
)
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=False
)
self.__update()
if np.isscalar(rp):
rp = np.array([rp])
else:
rp = np.array(rp)
if self.fit_method == 'MLE':
rv_kwargs = kwargs.pop('rv_kwargs', {})
ci_kwargs = kwargs.pop('ci_kwargs', {})
else:
rv_kwargs = kwargs.pop('rv_kwargs')
ci_kwargs = kwargs.pop('ci_kwargs')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return_values = self.return_value(rp, **rv_kwargs)
self.results = pd.DataFrame(
data=return_values, index=rp, columns=['Return Value']
)
self.results.index.name = 'Return Period'
if alpha is not None:
ci_lower, ci_upper = self.confidence_interval(rp=rp, alpha=alpha, **ci_kwargs)
if np.isscalar(ci_lower):
ci_lower, ci_upper = np.array([ci_lower]), np.array([ci_upper])
else:
ci_lower, ci_upper = np.array(ci_lower), np.array(ci_upper)
self.results[f'{alpha*100:.0f}% CI Lower'] = ci_lower
self.results[f'{alpha*100:.0f}% CI Upper'] = ci_upper
# Remove bad values from the results
if self.extremes_type == 'high':
mask = self.results['Return Value'].values >= self.extremes[self.column].values.min()
else:
mask = self.results['Return Value'].values <= self.extremes[self.column].values.max()
self.results = self.results[mask]
# Update internal status
self.__status = dict(
extremes=True,
fit=True,
results=True
)
self.__update()
def pdf(self, x, **kwargs):
"""
Estimates probability density at value <x> using the fitted distribution.
Parameters
----------
x : float or iterable
Values at which the probability density is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of probability densities at <x>.
"""
if self.extremes_type == 'high':
return self.___get_property(x=x-self.threshold, prop='pdf', **kwargs)
else:
return self.___get_property(x=self.threshold-x, prop='pdf', **kwargs)
def cdf(self, x, **kwargs):
"""
Estimates cumulative probability at value <x> using the fitted distribution.
Parameters
----------
x : float or iterable
Values at which the cumulative probability density is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of cumulative probability at <x>.
"""
if self.extremes_type == 'high':
return self.___get_property(x=x-self.threshold, prop='cdf', **kwargs)
else:
return self.___get_property(x=self.threshold-x, prop='cdf', **kwargs)
def ppf(self, q, **kwargs):
"""
Estimates ppf (inverse cdf or quantile function) at value <x> using the fitted distribution.
Parameters
----------
q : float or iterable
Quantiles at which the ppf is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of ppf at <x>.
"""
if self.extremes_type == 'high':
return self.threshold + self.___get_property(x=q, prop='ppf', **kwargs)
else:
return self.threshold - self.___get_property(x=q, prop='ppf', **kwargs)
def isf(self, q, **kwargs):
"""
Estimates isf (inverse survival or upper quantile function) at value <x> using the fitted distribution.
Parameters
----------
q : float or iterable
Quantiles at which the isf is estimated.
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of isf at <x>.
"""
if self.extremes_type == 'high':
return self.threshold + self.___get_property(x=q, prop='isf', **kwargs)
else:
return self.threshold - self.___get_property(x=q, prop='isf', **kwargs)
def ___get_property(self, x, prop, **kwargs):
"""
Estimates property (pdf, cdf, ppf, etc.) at value <x> using the fitted distribution parameters.
Parameters
----------
x : float or iterable
Value at which the property is estimated.
prop : str
Scipy property to be estimated (pdf, ppf, isf, cdf, rvs, etc.).
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
Depending on x, either estimate or array of estimates of property at <x>
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
distribution_object = getattr(scipy.stats, self.distribution_name)
property_function = getattr(distribution_object, prop)
if not np.isscalar(x):
x = np.array(x)
if self.fit_method == 'MLE':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return property_function(x, *self.fit_parameters)
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
estimate_method = kwargs.pop('estimate_method', 'parameter mode')
if estimate_method not in ['parameter mode', 'value mode', 'value quantile']:
raise ValueError(f'Estimate method <{estimate_method}> not recognized')
if estimate_method in ['parameter mode', 'value mode']:
kernel_steps = kwargs.pop('kernel_steps', 1000)
else:
kernel_steps = None
if estimate_method == 'value quantile':
quantile = kwargs.pop('quantile', .5)
else:
quantile = None
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
# Estimate mode of each parameter as peaks of gaussian kernel.
# Use estimated parameters to calculate property function
if estimate_method == 'parameter mode':
parameters = self._kernel_fit_parameters(burn_in=burn_in, kernel_steps=kernel_steps)
return property_function(x, *parameters)
# Load samples
ndim = self.mcmc_chain.shape[-1]
samples = self.mcmc_chain[:, burn_in:, :].reshape((-1, ndim))
property_samples = np.array([property_function(x, *_theta) for _theta in samples])
# Estimate property function as mode of distribution of property value
# for all samples in self.mcmc_chain as peaks of gaussian kernel.
if estimate_method == 'value mode':
if np.isscalar(x):
if np.all(np.isnan(property_samples)):
return np.nan
else:
ps_filtered = property_samples[~np.isnan(property_samples)]
if np.all(ps_filtered == ps_filtered[0]):
return np.nan
else:
kernel = scipy.stats.gaussian_kde(ps_filtered)
support = np.linspace(ps_filtered.min(), ps_filtered.max(), kernel_steps)
density = kernel.evaluate(support)
return support[density.argmax()]
else:
estimates = []
for ps in property_samples.T:
if np.all(np.isnan(ps)):
estimates.append(np.nan)
else:
ps_filtered = ps[~np.isnan(ps)]
if np.all(ps_filtered == ps_filtered[0]):
estimates.append(np.nan)
else:
kernel = scipy.stats.gaussian_kde(ps_filtered)
support = np.linspace(ps_filtered.min(), ps_filtered.max(), kernel_steps)
density = kernel.evaluate(support)
estimates.append(support[density.argmax()])
return np.array(estimates)
# Estimate property function as quantile of distribution of property value
# for all samples in self.mcmc_chain.
elif estimate_method == 'value quantile':
if np.isscalar(quantile):
if quantile <= 0 or quantile > 1:
raise ValueError(f'Quantile must be in range (0,1], quantile={quantile} was passed')
else:
raise ValueError(f'Quantile must be scalar, {type(quantile)} was passed')
if np.isscalar(x):
return np.nanquantile(a=property_samples, q=quantile)
else:
return np.array(
[
np.nanquantile(a=row, q=quantile) for row in property_samples.T
]
)
else:
raise RuntimeError(f'Unknown fit_method {self.fit_method} encountered')
def plot_summary(self, support=None, bins=10, plotting_position='Weibull', **kwargs):
"""
Plots projected return values, pdf, and cdf values against observed.
Parameters
----------
support : array_like, optional
Values used to estimate pdf and cdf. By default is 100 linearly spaced min to max extreme values.
bins : int, optional
Number of bins used to plot cdf and pdf histograms (default=10).
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
tuple(fig, ax1, ax2, ax3)
Figure, return value, pdf, cdf axes.
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['results']:
raise UnboundLocalError('No data found. Generate results by runing self.generate_results() method first')
if support is None:
support = np.linspace(
self.extremes[self.column].values.min(), self.extremes[self.column].values.max(), 100
)
if self.fit_method == 'MCMC':
rv_kwargs = kwargs.pop('rv_kwargs')
else:
rv_kwargs = {}
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return_period = self.__get_return_period(plotting_position=plotting_position)
with plt.style.context('bmh'):
# Setup canvas
fig = plt.figure(figsize=(12, 8))
ax1 = plt.subplot2grid((2, 2), (0, 0), colspan=2)
ax2 = plt.subplot2grid((2, 2), (1, 0))
ax3 = plt.subplot2grid((2, 2), (1, 1))
# Plot return values
ax1.set_title('Return Value Plot')
ax1.set_ylabel(f'{self.column}')
ax1.set_xlabel(f'Return period')
ax1.plot(
self.results.index, self.results['Return Value'].values,
color='k', lw=2, zorder=15, label='Central estimate'
)
if len(self.results.columns) == 3:
ax1.plot(
self.results.index, self.results[self.results.columns[1]].values,
ls='--', color='k', lw=.5, zorder=10
)
ax1.plot(
self.results.index, self.results[self.results.columns[2]].values,
ls='--', color='k', lw=.5, zorder=10
)
ax1.fill_between(
self.results.index, self.results[self.results.columns[1]],
self.results[self.results.columns[2]],
alpha=.1, color='k',
label=f'{self.results.columns[1].split("%")[0]}% confidence interval', zorder=5
)
points = ax1.scatter(
return_period, self.extremes[self.column].values,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=15,
label=f'Observed extreme event\n{plotting_position} plotting position'
)
ax1.semilogx()
ax1.grid(b=True, which='minor', axis='x')
ax1.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%.0f'))
ax1.legend()
annot = ax1.annotate(
"", xy=(self.extremes['Return Period'].values.mean(), self.extremes[self.column].values.mean()),
xytext=(10, 10), textcoords="offset points",
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax1.scatter(
self.extremes['Return Period'].values.mean(), self.extremes[self.column].values.mean(),
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_period[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax1:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
# Plot PDF
ax2.set_ylabel('Probability density')
ax2.set_xlabel(f'{self.column}')
ax2.hist(
self.extremes[self.column].values, bins=bins, density=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax2.hist(
self.extremes[self.column].values, bins=bins, density=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
ax2.plot(
support, self.pdf(support, **rv_kwargs),
color='k', lw=2, zorder=15
)
ax2.scatter(
self.extremes[self.column].values, [0] * len(self.extremes),
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=20
)
ax2.set_ylim(0)
# Plot CDF
ax3.set_ylabel('Cumulative probability')
ax3.set_xlabel(f'{self.column}')
if self.extremes_type == 'high':
ax3.hist(
self.extremes[self.column], bins=bins, density=True, cumulative=True,
color='k', rwidth=.9, alpha=0.2, zorder=5
)
ax3.hist(
self.extremes[self.column], bins=bins, density=True, cumulative=True,
color='k', rwidth=.9, edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
else:
_, boundaries = np.histogram(self.extremes[self.column].values, bins)
centers = np.array([(boundaries[i] + boundaries[i - 1]) / 2 for i in range(1, len(boundaries))])
densities = []
for i, c in enumerate(centers):
mask = self.extremes[self.column].values >= boundaries[i]
densities.append(np.sum(mask) / len(self.extremes))
ax3.bar(
centers, densities, width=.9*(boundaries[1]-boundaries[0]),
color='k', alpha=0.2, zorder=5
)
ax3.bar(
centers, densities, width=.9*(boundaries[1]-boundaries[0]),
color='k', edgecolor='k', facecolor='None', lw=1, ls='--', zorder=10
)
ax3.plot(
support, self.cdf(support, **rv_kwargs),
color='k', lw=2, zorder=15
)
ax3.scatter(
self.extremes[self.column].values, [0] * len(self.extremes),
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=20
)
ax3.set_ylim(0)
fig.tight_layout()
return fig, ax1, ax2, ax3
def plot_qq(self, k, plot=True, plotting_position='Weibull', quantiles=True, **kwargs):
"""
Plots theoretical quantiles (probabilites) agains observed quantiles (probabilites).
Parameters
----------
k : int
Number of estimated (non-fixed) parameters in the distribution.
plot : bool, optional
Generates plot if True, returns data if False (default=True).
plotting_position : str, optional
Plotting position (default='Weibull'). Has no effect on return value inference,
affects only some goodness of fit statistics and locations of observed extremes on the
return values plot.
quantiles : bool, optional
If True, produces a quantile plot (Q-Q, ppf) (default=True).
If False, produces a probability plot (P-P, cdf).
kwargs
if fit is MCMC:
rv_kwargs : dict
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
estimate_method : str, optional
'parameter mode' (default) - calculates value for parameters
estimated as mode (histogram peak, through gaussian kernel)
'value mode' - calculates values for each sample and then determines
value estimate as mode (histogram peak, through gaussian kernel)
'value quantile' - calculates values for each sample and then determines
value estimate as quantile of the value distribution
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
Only for 'parameter mode' and 'value mode' methods.
quantile : float, optional
Quantile for 'value quantile' method (default=.5, aka median).
Must be in the range (0, 1].
Returns
-------
if plot=True (default) : tuple(fig, ax)
if plot=False :
tuple((theoretical, observed), (r, p))
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method == 'MLE':
rv_kwargs = kwargs.pop('rv_kwargs', {})
else:
rv_kwargs = kwargs.pop('rv_kwargs')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
ecdf = self.__get_return_period(plotting_position=plotting_position, return_cdf=True)
return_periods = self.__get_return_period(plotting_position=plotting_position)
# Estimate theoretical values based on returned quantiles
if quantiles:
theoretical = self.ppf(ecdf, **rv_kwargs)
else:
theoretical = self.cdf(self.extremes[self.column].values, **rv_kwargs)
theoretical[np.isinf(theoretical)] = np.nan
mask = ~np.isnan(theoretical)
if quantiles:
r, p = scipy.stats.pearsonr(self.extremes[self.column].values[mask], theoretical[mask])
else:
r, p = scipy.stats.pearsonr(ecdf, theoretical[mask])
r = np.sqrt(
1 - (1 - r ** 2) * (len(theoretical[mask]) - 1) / (len(theoretical[mask]) - (k + 1))
)
if plot:
with plt.style.context('bmh'):
# Quantile plot
if quantiles:
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
theoretical, self.extremes[self.column].values,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=10
)
lims = ax.get_xlim(), ax.get_ylim()
dlims = (-1e9, 1e9)
ax.plot(dlims, dlims, ls='--', lw=1, zorder=5, color='k')
ax.set_xlim(np.min(lims), np.max(lims))
ax.set_ylim(np.min(lims), np.max(lims))
ax.set_title(r'Quantile Plot')
plt.xlabel(r'Theoretical quantiles')
plt.ylabel(rf'Observed quantiles, {plotting_position} plotting position')
ax.text(
.05, .9, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes,
s=f'$\\bar{{R}}^2$={r**2:>.2f}\np={p:>.3f}', fontsize=14,
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25)
)
annot = ax.annotate(
'', xy=(theoretical[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
theoretical[0]+self.threshold, self.extremes[self.column].values[0],
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_periods[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
# Probability plot
else:
fig, ax = plt.subplots(figsize=(12, 8))
points = ax.scatter(
theoretical, ecdf,
edgecolors='white', marker='o', facecolors='k', s=40, lw=1, zorder=10
)
lims = ax.get_xlim(), ax.get_ylim()
dlims = (-1e9, 1e9)
ax.plot(dlims, dlims, ls='--', lw=1, zorder=5, color='k')
ax.set_xlim(np.min(lims), np.max(lims))
ax.set_ylim(np.min(lims), np.max(lims))
ax.set_title(r'Probability Plot')
plt.xlabel(r'Theoretical probabilities')
plt.ylabel(rf'Observed probabilities, {plotting_position} plotting position')
ax.text(
.05, .9, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes,
s=f'$\\bar{{R}}^2$={r**2:>.2f}\np={p:>.3f}', fontsize=14,
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25)
)
annot = ax.annotate(
'', xy=(theoretical[0], self.extremes[self.column].values[0]),
xytext=(10, 10), textcoords='offset points',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='k', lw=1, zorder=25),
zorder=30
)
point = ax.scatter(
theoretical[0], self.extremes[self.column].values[0],
edgecolors='white', marker='o', facecolors='orangered', s=80, lw=1, zorder=20
)
point.set_visible(False)
annot.set_visible(False)
def update_annot(ind):
n = ind['ind'][0]
pos = points.get_offsets()[n]
annot.xy = pos
point.set_offsets(pos)
text = str(
f'Date : {self.extremes.index[n]}\n'
f'Value : {self.extremes[self.column].values[n]:.2f}\n'
f'Return Period : {return_periods[n]:.2f}'
)
annot.set_text(text)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = points.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
point.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
point.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', hover)
fig.tight_layout()
return fig, ax
else:
if quantiles:
return (
(theoretical, self.extremes[self.column].values),
(r, p)
)
else:
return (
(theoretical, ecdf),
(r, p)
)
def goodness_of_fit(self, method, **kwargs):
"""
Calculates various goodness-of-fit statistics for selected model.
Parameters
----------
method : str
Goodness of fit statistic method.
Supported methods:
'AIC' - Akaike information criterion
Lower value corresponds to a better fit.
see https://en.wikipedia.org/wiki/Akaike_information_criterion
'log-likelihood' - log-likelihood
Higher value corresponds to a better fit.
'KS' - Kolmogorov Smirnot test
Null hypothesis - both samples come from the same distribution.
If p<0.05 => reject Null hypothesis with p-level of confidence.
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html
'chi-square' - Chi-Square test
Null hypothesis - both samples come from the same distribution.
Calculates theoretical counts for given quantile ranges and compares to theoretical.
If p<0.05 => reject Null hypothesis with p-level of confidence.
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html
kwargs
if fit is MCMC
burn_in : int
Number of samples to discard. Samples, before the series converges, should be discarded.
kernel_steps : int, optional
Number of bins (kernel support points) to determine mode (default=1000).
for AIC
order : int, optional
Order of AIC (1 for regular, 2 for small samples) (default=2).
k : int
Number of parameters estimated by the model (fixed parameters don't count)
fot KS
mode : str, optional
See scipy docs (default='approx').
alternative : str, optional
See scipy docs (default='two-sided').
for chi-square
chi_quantiles : int, optional
Number of equal slices (quantiles) into which observed data is split
to calculate the stitistic(default=4).
k : int
Number of parameters estimated by the model (fixed parameters don't count)
Returns
-------
if method = 'log-likelihood' : float, log-likelihood
if method = 'AIC' : float, AIC statistic
if method = 'KS' : tuple(statistic, p-value)
if method = 'chi-square' : tuple(statistic, p-value)
"""
# Make sure fit method was executed and fit data was generated
if not self.__status['fit']:
raise ValueError('No fit information found. Run self.fit() method first')
if self.fit_method == 'MLE':
fit_parameters = self.fit_parameters
elif self.fit_method == 'MCMC':
burn_in = kwargs.pop('burn_in')
kernel_steps = kwargs.pop('kernel_steps', 1000)
fit_parameters = self._kernel_fit_parameters(burn_in=burn_in, kernel_steps=kernel_steps)
else:
raise RuntimeError(f'Unexpected fit_method {self.fit_method}')
distribution_object = getattr(scipy.stats, self.distribution_name)
exceedances = self.extremes[self.column].values - self.threshold
# Flip exceedances around 0
if self.extremes_type == 'low':
exceedances *= -1
log_likelihood = np.sum(
distribution_object.logpdf(exceedances, *fit_parameters)
)
if method == 'log-likelihood':
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
return log_likelihood
elif method == 'AIC':
order = kwargs.pop('order', 2)
k = kwargs.pop('k')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
aic = 2 * k - 2 * log_likelihood
if order == 1:
return aic
elif order == 2:
return aic + (2 * k ** 2 + 2 * k) / (len(self.extremes) - k - 1)
else:
raise ValueError(f'order must be 1 or 2, {order} was passed')
elif method == 'KS':
mode = kwargs.pop('mode', 'approx')
alternative = kwargs.pop('alternative', 'two-sided')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
exceedances = self.extremes[self.column].values - self.threshold
if self.extremes_type == 'low':
exceedances *= -1
ks, p = scipy.stats.kstest(
rvs=exceedances, cdf=distribution_object.cdf, args=fit_parameters,
alternative=alternative, mode=mode
)
return ks, p
elif method == 'chi-square':
chi_quantiles = kwargs.pop('chi_quantiles', 4)
k = kwargs.pop('k')
assert len(kwargs) == 0, f'unrecognized arguments passed in: {", ".join(kwargs.keys())}'
chi_quantile_ranges = [1 / chi_quantiles * (i + 1) for i in np.arange(-1, chi_quantiles)]
observed_counts, expected_counts = [], []
for i in range(chi_quantiles):
bot = np.nanquantile(
self.extremes[self.column].values,
chi_quantile_ranges[i]
)
top = np.nanquantile(
self.extremes[self.column].values,
chi_quantile_ranges[i + 1]
)
if i + 1 == chi_quantiles:
observed_counts.append(
len(
self.extremes[
(self.extremes[self.column] >= bot)
& (self.extremes[self.column] <= top)
]
)
)
else:
observed_counts.append(
len(
self.extremes[
(self.extremes[self.column] >= bot)
& (self.extremes[self.column] < top)
]
)
)
expected_counts.append(
len(self.extremes) * (self.cdf(top) - self.cdf(bot))
)
if min(observed_counts) <= 5 or min(expected_counts) <= 5:
raise ValueError(f'Too few observations in observed counts {min(observed_counts)} '
f'or expected counts {min(expected_counts):.0f}, reduce chi_quantiles')
cs, p = scipy.stats.chisquare(f_obs=observed_counts, f_exp=expected_counts, ddof=k)
return cs, p
else:
raise ValueError(f'Method {method} not recognized')
if __name__ == "__main__":
# Load data and initialize EVA
import os
df = pd.read_csv(
os.path.join(os.getcwd(), r'test data\Battery_residuals.csv'),
index_col=0, parse_dates=True
)
self = EVA(dataframe=df, column='Residuals (ft)', block_size=365.25, gap_length=24)
# Set up test parameters
etype = 'high'
extremes_method = 'POT'
_method = 'MCMC'
mle_ci = 'Delta'
if extremes_method == 'POT':
_distribution = 'genpareto'
elif extremes_method == 'BM':
_distribution = 'genextreme'
else:
raise RuntimeError
# Run a series of methods to assist in finding optimal threshold
if extremes_method == 'POT':
if etype == 'high':
self.plot_mean_residual_life(
thresholds=np.arange(2, 8, .01), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='high'
)
self.plot_parameter_stability(
thresholds=np.arange(3, 8, .05), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='high'
)
elif etype == 'low':
self.plot_mean_residual_life(
thresholds=np.arange(-8, -2, .01), r=24*7, alpha=.95,
adjust_threshold=True, limit=10, extremes_type='low'
)
self.plot_parameter_stability(
thresholds=np.arange(-8, -2.5, .05), r=24*7, alpha=.95,
adjust_threshold=True, limit=20, extremes_type='low'
)
# Extract extreme values
if extremes_method == 'BM':
self.get_extremes(method='BM', plotting_position='Weibull', extremes_type=etype)
elif extremes_method == 'POT':
if etype == 'high':
self.get_extremes(method='POT', threshold=3, r=24*7, plotting_position='Weibull', extremes_type='high')
elif etype == 'low':
self.get_extremes(method='POT', threshold=-2.8, r=24*7, plotting_position='Weibull', extremes_type='low')
self.plot_extremes()
# Test independence of POT extremes
if extremes_method == 'POT':
self.test_extremes(method='autocorrelation')
self.test_extremes(method='lag plot', lag=1)
print(self.test_extremes(method='runs test', alpha=0.05))
# Fit distribution
if _method == 'MLE':
if _distribution == 'genpareto':
# Shape (f0) and location (floc) are both 0 => equivalent to exponential distribution (expon with floc=0)
self.fit(distribution_name=_distribution, fit_method='MLE', scipy_fit_options=dict(floc=0))
elif _distribution == 'genextreme':
self.fit(distribution_name=_distribution, fit_method='MLE')
elif _method == 'MCMC':
self.fit(
distribution_name=_distribution, fit_method='MCMC',
nsamples=1000, nwalkers=200, starting_bubble=.01
)
# Trace plot
if _distribution == 'genpareto':
fig_trace, axes_trace = self.plot_trace(burn_in=200, labels=[r'$\xi$', r'$\sigma$'])
elif _distribution == 'genextreme':
fig_trace, axes_trace = self.plot_trace(burn_in=200, labels=[r'$\xi$', r'$\mu$', r'$\sigma$'])
if _distribution == 'genpareto':
fig_corner = self.plot_corner(burn_in=200, bins=50, labels=[r'$\xi$', r'$\sigma$'], smooth=1)
elif _distribution == 'genextreme':
fig_corner = self.plot_corner(burn_in=200, bins=50, labels=[r'$\xi$', r'$\mu$', r'$\sigma$'], smooth=1)
# Test quality of fit
if _method == 'MLE':
print(self.goodness_of_fit(method='AIC', k=1))
self.plot_qq(k=2, plotting_position='Weibull', quantiles=True)
self.plot_qq(k=2, plotting_position='Weibull', quantiles=False)
else:
_burn_in = 200
print(self.goodness_of_fit(method='AIC', k=2, burn_in=_burn_in, kernel_steps=100))
self.plot_qq(
k=2, plotting_position='Weibull', quantiles=True,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100)
)
self.plot_qq(
k=2, plotting_position='Weibull', quantiles=False,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100)
)
# Generate results
if _method == 'MCMC':
_burn_in = 200
self.generate_results(
alpha=.95,
rv_kwargs=dict(burn_in=_burn_in, estimate_method='parameter mode', kernel_steps=100),
ci_kwargs=dict(burn_in=_burn_in)
)
elif _method == 'MLE':
if mle_ci == 'Monte Carlo':
self.generate_results(
alpha=.95,
ci_kwargs=dict(
method='Monte Carlo', k=100, source='data', sampling_method='constant', assume_normality=False
)
)
elif mle_ci == 'Delta':
self.generate_results(alpha=.95, ci_kwargs=dict(method='Delta', dx='1e-10', precision=100))
# Plot extremes return plot
if _method == 'MCMC':
_burn_in = 200
self.plot_summary(
bins=10, plotting_position='Gringorten',
rv_kwargs=dict(burn_in=200, estimate_method='parameter mode', kernel_steps=100)
)
elif _method == 'MLE':
self.plot_summary(bins=10, plotting_position='Gringorten')
| gpl-3.0 | -9,124,196,757,702,102,000 | 45.572071 | 120 | 0.527463 | false |
harikishen/addons-server | src/olympia/editors/tests/test_sql_model.py | 1 | 10512 | # -*- coding: utf-8 -*-
"""Tests for SQL Model.
Currently these tests are coupled tighly with MySQL
"""
from datetime import datetime
from django.db import connection, models
from django.db.models import Q
import pytest
from olympia.amo.tests import BaseTestCase
from olympia.editors.sql_model import RawSQLModel
def execute_all(statements):
with connection.cursor() as cursor:
for sql in statements:
if not sql.strip():
continue
cursor.execute(sql, [])
class Summary(RawSQLModel):
category = models.CharField(max_length=255)
total = models.IntegerField()
latest_product_date = models.DateTimeField()
def base_query(self):
return {
'select': {
'category': 'c.name',
'total': 'count(*)',
'latest_product_date': 'max(p.created)'
},
'from': [
'sql_model_test_product p',
'join sql_model_test_product_cat x on x.product_id=p.id',
'join sql_model_test_cat c on x.cat_id=c.id'],
'where': [],
'group_by': 'category'
}
class ProductDetail(RawSQLModel):
product = models.CharField(max_length=255)
category = models.CharField(max_length=255)
def base_query(self):
return {
'select': {
'product': 'p.name',
'category': 'c.name'
},
'from': [
'sql_model_test_product p',
'join sql_model_test_product_cat x on x.product_id=p.id',
'join sql_model_test_cat c on x.cat_id=c.id'],
'where': []
}
class TestSQLModel(BaseTestCase):
@pytest.fixture(autouse=True)
def setup(self, request):
sql = """
create table if not exists sql_model_test_product (
id int(11) not null auto_increment primary key,
name varchar(255) not null,
created datetime not null
);
create table if not exists sql_model_test_cat (
id int(11) not null auto_increment primary key,
name varchar(255) not null
);
create table if not exists sql_model_test_product_cat (
id int(11) not null auto_increment primary key,
cat_id int(11) not null references sql_model_test_cat (id),
product_id int(11) not null references sql_model_test_product (id)
);
insert into sql_model_test_product (id, name, created)
values (1, 'defilbrilator', UTC_TIMESTAMP());
insert into sql_model_test_cat (id, name)
values (1, 'safety');
insert into sql_model_test_product_cat (product_id, cat_id)
values (1, 1);
insert into sql_model_test_product (id, name, created)
values (2, 'life jacket', UTC_TIMESTAMP());
insert into sql_model_test_product_cat (product_id, cat_id)
values (2, 1);
insert into sql_model_test_product (id, name, created)
values (3, 'snake skin jacket',UTC_TIMESTAMP());
insert into sql_model_test_cat (id, name)
values (2, 'apparel');
insert into sql_model_test_product_cat (product_id, cat_id)
values (3, 2);
""".split(';')
def teardown():
try:
sql = """
drop table if exists sql_model_test_product_cat;
drop table if exists sql_model_test_cat;
drop table if exists sql_model_test_product;
""".split(';')
execute_all(sql)
except:
pass # No failing here.
teardown()
execute_all(sql)
request.addfinalizer(teardown)
def test_all(self):
assert sorted([s.category for s in Summary.objects.all()]) == (
['apparel', 'safety'])
def test_count(self):
assert Summary.objects.all().count() == 2
def test_one(self):
c = Summary.objects.all().order_by('category')[0]
assert c.category == 'apparel'
def test_get_by_index(self):
qs = Summary.objects.all().order_by('category')
assert qs[0].category == 'apparel'
assert qs[1].category == 'safety'
def test_get(self):
c = Summary.objects.all().having('total =', 1).get()
assert c.category == 'apparel'
def test_get_no_object(self):
with self.assertRaises(Summary.DoesNotExist):
Summary.objects.all().having('total =', 999).get()
def test_get_many(self):
with self.assertRaises(Summary.MultipleObjectsReturned):
Summary.objects.all().get()
def test_slice1(self):
qs = Summary.objects.all()[0:1]
assert [c.category for c in qs] == ['apparel']
def test_slice2(self):
qs = Summary.objects.all()[1:2]
assert [c.category for c in qs] == ['safety']
def test_slice3(self):
qs = Summary.objects.all()[:2]
assert sorted([c.category for c in qs]) == ['apparel', 'safety']
def test_slice4(self):
qs = Summary.objects.all()[0:]
assert sorted([c.category for c in qs]) == ['apparel', 'safety']
def test_slice5(self):
assert ['defilbrilator'] == [
c.product for c in
ProductDetail.objects.all().order_by('product')[0:1]]
assert ['life jacket'] == [
c.product for c in
ProductDetail.objects.all().order_by('product')[1:2]]
assert ['snake skin jacket'] == [
c.product for c in
ProductDetail.objects.all().order_by('product')[2:3]]
def test_negative_slices_not_supported(self):
with self.assertRaises(IndexError):
Summary.objects.all()[:-1]
def test_order_by(self):
c = Summary.objects.all().order_by('category')[0]
assert c.category == 'apparel'
c = Summary.objects.all().order_by('-category')[0]
assert c.category == 'safety'
def test_order_by_alias(self):
c = ProductDetail.objects.all().order_by('product')[0]
assert c.product == 'defilbrilator'
c = ProductDetail.objects.all().order_by('-product')[0]
assert c.product == 'snake skin jacket'
def test_order_by_injection(self):
with self.assertRaises(ValueError):
Summary.objects.order_by('category; drop table foo;')[0]
def test_filter(self):
c = Summary.objects.all().filter(category='apparel')[0]
assert c.category == 'apparel'
def test_filter_raw_equals(self):
c = Summary.objects.all().filter_raw('category =', 'apparel')[0]
assert c.category == 'apparel'
def test_filter_raw_in(self):
qs = Summary.objects.all().filter_raw('category IN',
['apparel', 'safety'])
assert [c.category for c in qs] == ['apparel', 'safety']
def test_filter_raw_non_ascii(self):
uni = 'フォクすけといっしょ'.decode('utf8')
qs = (Summary.objects.all().filter_raw('category =', uni)
.filter_raw(Q('category =', uni) | Q('category !=', uni)))
assert [c.category for c in qs] == []
def test_combining_filters_with_or(self):
qs = (ProductDetail.objects.all()
.filter(Q(product='life jacket') | Q(product='defilbrilator')))
assert sorted([r.product for r in qs]) == [
'defilbrilator', 'life jacket']
def test_combining_raw_filters_with_or(self):
qs = (ProductDetail.objects.all()
.filter_raw(Q('product =', 'life jacket') |
Q('product =', 'defilbrilator')))
assert sorted([r.product for r in qs]) == [
'defilbrilator', 'life jacket']
def test_nested_raw_filters_with_or(self):
qs = (ProductDetail.objects.all()
.filter_raw(Q('category =', 'apparel',
'product =', 'defilbrilator') |
Q('product =', 'life jacket')))
assert sorted([r.product for r in qs]) == ['life jacket']
def test_crazy_nesting(self):
qs = (ProductDetail.objects.all()
.filter_raw(Q('category =', 'apparel',
'product =', 'defilbrilator',
Q('product =', 'life jacket') |
Q('product =', 'snake skin jacket'),
'category =', 'safety')))
# print qs.as_sql()
assert sorted([r.product for r in qs]) == ['life jacket']
def test_having_gte(self):
c = Summary.objects.all().having('total >=', 2)[0]
assert c.category == 'safety'
def test_invalid_raw_filter_spec(self):
with self.assertRaises(ValueError):
Summary.objects.all().filter_raw(
"""category = 'apparel'; drop table foo;
select * from foo where category = 'apparel'""",
'apparel')[0]
def test_filter_field_injection(self):
f = ("c.name = 'apparel'; drop table foo; "
"select * from sql_model_test_cat where c.name = 'apparel'")
with self.assertRaises(ValueError):
c = Summary.objects.all().filter(**{f: 'apparel'})[0]
assert c.category == 'apparel'
def test_filter_value_injection(self):
v = ("'apparel'; drop table foo; "
"select * from sql_model_test_cat where c.name")
query = Summary.objects.all().filter(**{'c.name': v})
try:
query[0]
except IndexError:
pass
# NOTE: this reaches into MySQLdb's cursor :(
executed = query._cursor.cursor._executed
assert "c.name = '\\'apparel\\'; drop table foo;" in executed, (
'Expected query to be escaped: %s' % executed)
def check_type(self, val, types):
assert isinstance(val, types), (
'Unexpected type: %s for %s' % (type(val), val))
def test_types(self):
row = Summary.objects.all().order_by('category')[0]
self.check_type(row.category, unicode)
self.check_type(row.total, (int, long))
self.check_type(row.latest_product_date, datetime)
def test_values(self):
row = Summary.objects.all().order_by('category')[0]
assert row.category == 'apparel'
assert row.total == 1
assert row.latest_product_date.timetuple()[0:3] == (
datetime.utcnow().timetuple()[0:3])
| bsd-3-clause | -8,566,747,079,214,111,000 | 35.430556 | 78 | 0.551754 | false |
takmid/inasafe | safe_qgis/impact_functions_doc_base.py | 1 | 9760 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'impact_functions_doc_base.ui'
#
# Created: Fri Sep 14 14:43:14 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ImpactFunctionsDocBase(object):
def setupUi(self, ImpactFunctionsDocBase):
ImpactFunctionsDocBase.setObjectName(_fromUtf8("ImpactFunctionsDocBase"))
ImpactFunctionsDocBase.resize(821, 733)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/inasafe/icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ImpactFunctionsDocBase.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(ImpactFunctionsDocBase)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.myButtonBox = QtGui.QDialogButtonBox(ImpactFunctionsDocBase)
self.myButtonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.myButtonBox.setAutoFillBackground(False)
self.myButtonBox.setOrientation(QtCore.Qt.Horizontal)
self.myButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Close|QtGui.QDialogButtonBox.Help|QtGui.QDialogButtonBox.Reset)
self.myButtonBox.setCenterButtons(False)
self.myButtonBox.setObjectName(_fromUtf8("myButtonBox"))
self.gridLayout.addWidget(self.myButtonBox, 1, 1, 1, 1)
self.gridLayoutMain = QtGui.QGridLayout()
self.gridLayoutMain.setHorizontalSpacing(0)
self.gridLayoutMain.setObjectName(_fromUtf8("gridLayoutMain"))
self.label_title = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_title.sizePolicy().hasHeightForWidth())
self.label_title.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_title.setFont(font)
self.label_title.setObjectName(_fromUtf8("label_title"))
self.gridLayoutMain.addWidget(self.label_title, 1, 0, 1, 1)
self.label_id = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_id.sizePolicy().hasHeightForWidth())
self.label_id.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_id.setFont(font)
self.label_id.setObjectName(_fromUtf8("label_id"))
self.gridLayoutMain.addWidget(self.label_id, 1, 1, 1, 1)
self.label_subcategory = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_subcategory.sizePolicy().hasHeightForWidth())
self.label_subcategory.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_subcategory.setFont(font)
self.label_subcategory.setObjectName(_fromUtf8("label_subcategory"))
self.gridLayoutMain.addWidget(self.label_subcategory, 1, 3, 1, 1)
self.label_category = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_category.sizePolicy().hasHeightForWidth())
self.label_category.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_category.setFont(font)
self.label_category.setObjectName(_fromUtf8("label_category"))
self.gridLayoutMain.addWidget(self.label_category, 1, 2, 1, 1)
self.label_layertype = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_layertype.sizePolicy().hasHeightForWidth())
self.label_layertype.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_layertype.setFont(font)
self.label_layertype.setObjectName(_fromUtf8("label_layertype"))
self.gridLayoutMain.addWidget(self.label_layertype, 1, 4, 1, 1)
self.comboBox_id = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_id.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.comboBox_id.setObjectName(_fromUtf8("comboBox_id"))
self.gridLayoutMain.addWidget(self.comboBox_id, 3, 1, 1, 1)
self.comboBox_title = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_title.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.comboBox_title.setMinimumContentsLength(0)
self.comboBox_title.setObjectName(_fromUtf8("comboBox_title"))
self.gridLayoutMain.addWidget(self.comboBox_title, 3, 0, 1, 1)
self.comboBox_category = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_category.setObjectName(_fromUtf8("comboBox_category"))
self.gridLayoutMain.addWidget(self.comboBox_category, 3, 2, 1, 1)
self.label_unit = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_unit.sizePolicy().hasHeightForWidth())
self.label_unit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_unit.setFont(font)
self.label_unit.setObjectName(_fromUtf8("label_unit"))
self.gridLayoutMain.addWidget(self.label_unit, 1, 6, 1, 1)
self.label_datatype = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_datatype.sizePolicy().hasHeightForWidth())
self.label_datatype.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_datatype.setFont(font)
self.label_datatype.setObjectName(_fromUtf8("label_datatype"))
self.gridLayoutMain.addWidget(self.label_datatype, 1, 5, 1, 1)
self.comboBox_subcategory = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_subcategory.setObjectName(_fromUtf8("comboBox_subcategory"))
self.gridLayoutMain.addWidget(self.comboBox_subcategory, 3, 3, 1, 1)
self.comboBox_layertype = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_layertype.setObjectName(_fromUtf8("comboBox_layertype"))
self.gridLayoutMain.addWidget(self.comboBox_layertype, 3, 4, 1, 1)
self.comboBox_datatype = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_datatype.setObjectName(_fromUtf8("comboBox_datatype"))
self.gridLayoutMain.addWidget(self.comboBox_datatype, 3, 5, 1, 1)
self.comboBox_unit = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_unit.setObjectName(_fromUtf8("comboBox_unit"))
self.gridLayoutMain.addWidget(self.comboBox_unit, 3, 6, 1, 1)
self.webView = QtWebKit.QWebView(ImpactFunctionsDocBase)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.gridLayoutMain.addWidget(self.webView, 4, 0, 1, 7)
self.gridLayout.addLayout(self.gridLayoutMain, 0, 1, 1, 1)
self.retranslateUi(ImpactFunctionsDocBase)
QtCore.QObject.connect(self.myButtonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ImpactFunctionsDocBase.reject)
QtCore.QMetaObject.connectSlotsByName(ImpactFunctionsDocBase)
def retranslateUi(self, ImpactFunctionsDocBase):
ImpactFunctionsDocBase.setWindowTitle(QtGui.QApplication.translate("ImpactFunctionsDocBase", "InaSAFE Impact Functions", None, QtGui.QApplication.UnicodeUTF8))
self.label_title.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Title", None, QtGui.QApplication.UnicodeUTF8))
self.label_id.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "ID", None, QtGui.QApplication.UnicodeUTF8))
self.label_subcategory.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Subcategory", None, QtGui.QApplication.UnicodeUTF8))
self.label_category.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Category", None, QtGui.QApplication.UnicodeUTF8))
self.label_layertype.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Layer Type", None, QtGui.QApplication.UnicodeUTF8))
self.label_unit.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Unit", None, QtGui.QApplication.UnicodeUTF8))
self.label_datatype.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Data Type", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
import resources_rc
| gpl-3.0 | 9,172,954,171,871,963,000 | 57.795181 | 167 | 0.727254 | false |
jeffmkw/DAT210x-Lab | Module6/test.py | 1 | 2927 | import pandas as pd
# https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.names
#
# TODO: Load up the mushroom dataset into dataframe 'X'
# Verify you did it properly.
# Indices shouldn't be doubled.
# Header information is on the dataset's website at the UCI ML Repo
# Check NA Encoding # na_values = '!"
#
# .. your code here ..
# INFO: An easy way to show which rows have nans in them
# print X[pd.isnull(X).any(axis=1)]
X = pd.read_csv('Datasets/agaricus-lepiota.data',
names=['classes', 'cap-shape', 'cap-surface', 'cap-color', 'bruises?', 'odor', 'gill-attachment',
'gill-spacing', 'gill-size', 'gill-color', 'stalk-shape', 'stalk-root',
'stalk-surface-above-ring', 'stalk-surface-below-ring', 'stalk-color-above-ring',
'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number', 'ring-type',
'spore-print-color', 'population', 'habitat'],
na_values='?')
#
# TODO: Go ahead and drop any row with a nan
#
# .. your code here ..
# print(X.shape) # (8123, 22)
# print(X.dtypes)
# print(X[pd.isnull(X).any(axis=1)])# there is a hell lot of missing value
print(X.columns)
X.dropna(axis=0, how='any', inplace=True)
# print(X[pd.isnull(X).any(axis=1)]) # awesome. no missing value any more
#
# TODO: Copy the labels out of the dset into variable 'y' then Remove
# them from X. Encode the labels, using the .map() trick we showed
# you in Module 5 -- canadian:0, kama:1, and rosa:2
#
# .. your code here ..
y = X.classes
X.drop('classes', axis=1, inplace=True)
# print(set(y)) # {'p', 'e'}
y = y.map({'p': 1, 'e': 0})
# print(y)
#
# TODO: Encode the entire dataset using dummies
#
# .. your code here ..
X = pd.get_dummies(X)
# print(X)
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7
# Use variable names: X_train, X_test, y_train, y_test
#
# .. your code here ..
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
#
# TODO: Create an DT classifier. No need to set any parameters
#
# .. your code
from sklearn import tree
model = tree.DecisionTreeClassifier()
#
# TODO: train the classifier on the training data / labels:
# TODO: score the classifier on the testing data / labels:
#
# .. your code here ..
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print ("High-Dimensionality Score: ", round((score*100), 3))
#
# TODO: Use the code on the courses SciKit-Learn page to output a .DOT file
# Then render the .DOT to .PNGs. Ensure you have graphviz installed.
# If not, `brew install graphviz`.
#
# .. your code here ..
tree.export_graphviz(model.tree_, out_file = 'tree.dot', feature_names = X.columns)
from subprocess import call
call(['dot', '-T', 'png', 'tree.dot', '-o', 'tree.png']) | mit | -1,792,554,009,541,030,100 | 29.821053 | 113 | 0.65152 | false |
robertnishihara/ray | python/ray/tune/examples/ax_example.py | 1 | 2352 | """This test checks that AxSearch is functional.
It also checks that it is usable with a separate scheduler.
"""
import numpy as np
import time
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.ax import AxSearch
def hartmann6(x):
alpha = np.array([1.0, 1.2, 3.0, 3.2])
A = np.array([
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
])
P = 10**(-4) * np.array([
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
])
y = 0.0
for j, alpha_j in enumerate(alpha):
t = 0
for k in range(6):
t += A[j, k] * ((x[k] - P[j, k])**2)
y -= alpha_j * np.exp(-t)
return y
def easy_objective(config):
for i in range(config["iterations"]):
x = np.array([config.get("x{}".format(i + 1)) for i in range(6)])
tune.report(
timesteps_total=i,
hartmann6=hartmann6(x),
l2norm=np.sqrt((x**2).sum()))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
tune_kwargs = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
"x1": tune.uniform(0.0, 1.0),
"x2": tune.uniform(0.0, 1.0),
"x3": tune.uniform(0.0, 1.0),
"x4": tune.uniform(0.0, 1.0),
"x5": tune.uniform(0.0, 1.0),
"x6": tune.uniform(0.0, 1.0),
},
"stop": {
"timesteps_total": 100
}
}
algo = AxSearch(
max_concurrent=4,
metric="hartmann6",
mode="min",
parameter_constraints=["x1 + x2 <= 2.0"], # Optional.
outcome_constraints=["l2norm <= 1.25"], # Optional.
)
scheduler = AsyncHyperBandScheduler(metric="hartmann6", mode="min")
tune.run(
easy_objective,
name="ax",
search_alg=algo,
scheduler=scheduler,
**tune_kwargs)
| apache-2.0 | -7,999,556,598,531,771,000 | 26.670588 | 79 | 0.517857 | false |
wesm/ibis | ibis/expr/datatypes.py | 1 | 19843 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple, OrderedDict
import six
import ibis.common as com
import ibis.util as util
class Schema(object):
"""
Holds table schema information
"""
def __init__(self, names, types):
if not isinstance(names, list):
names = list(names)
self.names = names
self.types = [validate_type(x) for x in types]
self._name_locs = dict((v, i) for i, v in enumerate(self.names))
if len(self._name_locs) < len(self.names):
raise com.IntegrityError('Duplicate column names')
def __repr__(self):
space = 2 + max(map(len, self.names))
return "ibis.Schema {{{0}\n}}".format(
util.indent(
''.join(
'\n{0}{1}'.format(name.ljust(space), str(tipo))
for name, tipo in zip(self.names, self.types)
),
2
)
)
def __len__(self):
return len(self.names)
def __iter__(self):
return iter(self.names)
def __contains__(self, name):
return name in self._name_locs
def __getitem__(self, name):
return self.types[self._name_locs[name]]
def delete(self, names_to_delete):
for name in names_to_delete:
if name not in self:
raise KeyError(name)
new_names, new_types = [], []
for name, type_ in zip(self.names, self.types):
if name in names_to_delete:
continue
new_names.append(name)
new_types.append(type_)
return Schema(new_names, new_types)
@classmethod
def from_tuples(cls, values):
if not isinstance(values, (list, tuple)):
values = list(values)
if len(values):
names, types = zip(*values)
else:
names, types = [], []
return Schema(names, types)
@classmethod
def from_dict(cls, values):
names = list(values.keys())
types = values.values()
return Schema(names, types)
def equals(self, other, cache=None):
return self.names == other.names and self.types == other.types
def __eq__(self, other):
return self.equals(other)
def get_type(self, name):
return self.types[self._name_locs[name]]
def append(self, schema):
names = self.names + schema.names
types = self.types + schema.types
return Schema(names, types)
def items(self):
return zip(self.names, self.types)
class HasSchema(object):
"""
Base class representing a structured dataset with a well-defined
schema.
Base implementation is for tables that do not reference a particular
concrete dataset or database table.
"""
def __init__(self, schema, name=None):
assert isinstance(schema, Schema)
self._schema = schema
self._name = name
def __repr__(self):
return self._repr()
def _repr(self):
return "%s(%s)" % (type(self).__name__, repr(self.schema))
@property
def schema(self):
return self._schema
def get_schema(self):
return self._schema
def has_schema(self):
return True
@property
def name(self):
return self._name
def equals(self, other, cache=None):
if type(self) != type(other):
return False
return self.schema.equals(other.schema, cache=cache)
def root_tables(self):
return [self]
class DataType(object):
def __init__(self, nullable=True):
self.nullable = nullable
def __call__(self, nullable=True):
return self._factory(nullable=nullable)
def _factory(self, nullable=True):
return type(self)(nullable=nullable)
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(type(self))
def __repr__(self):
name = self.name.lower()
if not self.nullable:
name = '{0}[non-nullable]'.format(name)
return name
@property
def name(self):
return type(self).__name__
def equals(self, other, cache=None):
if isinstance(other, six.string_types):
other = validate_type(other)
return (isinstance(other, type(self)) and
self.nullable == other.nullable)
def can_implicit_cast(self, other):
return self.equals(other)
def scalar_type(self):
import ibis.expr.types as ir
return getattr(ir, '{0}Scalar'.format(type(self).__name__))
def array_type(self):
import ibis.expr.types as ir
return getattr(ir, '{0}Column'.format(type(self).__name__))
class Any(DataType):
pass
class Primitive(DataType):
pass
class Null(DataType):
pass
class Variadic(DataType):
pass
class Boolean(Primitive):
pass
Bounds = namedtuple('Bounds', ('upper', 'lower'))
class Integer(Primitive):
@property
def bounds(self):
exp = self._nbytes * 8 - 1
lower = -1 << exp
return Bounds(lower=lower, upper=~lower)
def can_implicit_cast(self, other):
return (
isinstance(other, Integer) and
(type(self) is Integer or other._nbytes <= self._nbytes)
)
class String(Variadic):
pass
class Date(Primitive):
pass
class Timestamp(Primitive):
pass
class SignedInteger(Integer):
pass
class Floating(Primitive):
def can_implicit_cast(self, other):
if isinstance(other, Integer):
return True
elif isinstance(other, Floating):
# return other._nbytes <= self._nbytes
return True
else:
return False
class Int8(Integer):
_nbytes = 1
class Int16(Integer):
_nbytes = 2
class Int32(Integer):
_nbytes = 4
class Int64(Integer):
_nbytes = 8
class Float(Floating):
_nbytes = 4
class Double(Floating):
_nbytes = 8
def parametric(cls):
type_name = cls.__name__
array_type_name = '{0}Column'.format(type_name)
scalar_type_name = '{0}Scalar'.format(type_name)
def array_type(self):
def constructor(op, name=None):
import ibis.expr.types as ir
return getattr(ir, array_type_name)(op, self, name=name)
return constructor
def scalar_type(self):
def constructor(op, name=None):
import ibis.expr.types as ir
return getattr(ir, scalar_type_name)(op, self, name=name)
return constructor
cls.array_type = array_type
cls.scalar_type = scalar_type
return cls
@parametric
class Decimal(DataType):
# Decimal types are parametric, we store the parameters in this object
def __init__(self, precision, scale, nullable=True):
super(Decimal, self).__init__(nullable=nullable)
self.precision = precision
self.scale = scale
def __repr__(self):
return '{0}(precision={1:d}, scale={2:d})'.format(
self.name,
self.precision,
self.scale,
)
def __str__(self):
return '{0}({1:d}, {2:d})'.format(
self.name.lower(),
self.precision,
self.scale,
)
def __hash__(self):
return hash((self.precision, self.scale))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return (
isinstance(other, Decimal) and
self.precision == other.precision and
self.scale == other.scale
)
@classmethod
def can_implicit_cast(cls, other):
return isinstance(other, (Floating, Decimal))
@parametric
class Category(DataType):
def __init__(self, cardinality=None, nullable=True):
super(Category, self).__init__(nullable=nullable)
self.cardinality = cardinality
def __repr__(self):
if self.cardinality is not None:
cardinality = self.cardinality
else:
cardinality = 'unknown'
return 'category(K={0})'.format(cardinality)
def __hash__(self):
return hash(self.cardinality)
def __eq__(self, other):
if not isinstance(other, Category):
return False
return self.cardinality == other.cardinality
def to_integer_type(self):
cardinality = self.cardinality
if cardinality is None:
return int64
elif cardinality < int8.bounds.upper:
return int8
elif cardinality < int16.bounds.upper:
return int16
elif cardinality < int32.bounds.upper:
return int32
else:
return int64
@parametric
class Struct(DataType):
def __init__(self, names, types, nullable=True):
super(Struct, self).__init__(nullable=nullable)
self.names = names
self.types = types
def __repr__(self):
return '{0}({1})'.format(
self.name,
list(zip(self.names, self.types))
)
def __str__(self):
return '{0}<{1}>'.format(
self.name.lower(),
', '.join(
'{0}: {1}'.format(n, t) for n, t in zip(self.names, self.types)
)
)
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.names == other.names and
self.types == other.types)
@classmethod
def from_tuples(self, pairs):
return Struct(*map(list, zip(*pairs)))
@parametric
class Array(Variadic):
def __init__(self, value_type, nullable=True):
super(Array, self).__init__(nullable=nullable)
self.value_type = value_type
def __repr__(self):
return '{0}({1})'.format(self.name, repr(self.value_type))
def __str__(self):
return '{0}<{1}>'.format(self.name.lower(), self.value_type)
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.value_type == other.value_type
)
@parametric
class Enum(DataType):
def __init__(self, rep_type, value_type, nullable=True):
super(Enum, self).__init__(nullable=nullable)
self.rep_type = rep_type
self.value_type = value_type
@parametric
class Map(DataType):
def __init__(self, key_type, value_type, nullable=True):
super(Map, self).__init__(nullable=nullable)
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return '{0}({1}, {2})'.format(
self.name,
repr(self.key_type),
repr(self.value_type),
)
def __str__(self):
return '{0}<{1}, {2}>'.format(
self.name.lower(),
self.key_type,
self.value_type,
)
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.key_type == other.key_type and
self.value_type == other.value_type
)
# ---------------------------------------------------------------------
any = Any()
null = Null()
boolean = Boolean()
int_ = Integer()
int8 = Int8()
int16 = Int16()
int32 = Int32()
int64 = Int64()
float = Float()
double = Double()
string = String()
date = Date()
timestamp = Timestamp()
_primitive_types = {
'any': any,
'null': null,
'boolean': boolean,
'int8': int8,
'int16': int16,
'int32': int32,
'int64': int64,
'float': float,
'double': double,
'string': string,
'date': date,
'timestamp': timestamp
}
class Tokens(object):
"""Class to hold tokens for lexing
"""
__slots__ = ()
ANY = 0
NULL = 1
PRIMITIVE = 2
DECIMAL = 3
VARCHAR = 4
CHAR = 5
ARRAY = 6
MAP = 7
STRUCT = 8
INTEGER = 9
FIELD = 10
COMMA = 11
COLON = 12
LPAREN = 13
RPAREN = 14
LBRACKET = 15
RBRACKET = 16
@staticmethod
def name(value):
return _token_names[value]
_token_names = dict(
(getattr(Tokens, n), n)
for n in dir(Tokens) if n.isalpha() and n.isupper()
)
Token = namedtuple('Token', ('type', 'value'))
_TYPE_RULES = OrderedDict(
[
# any, null
('(?P<ANY>any)', lambda token: Token(Tokens.ANY, any)),
('(?P<NULL>null)', lambda token: Token(Tokens.NULL, null)),
] + [
# primitive types
(
'(?P<{}>{})'.format(token.upper(), token),
lambda token, value=value: Token(Tokens.PRIMITIVE, value)
) for token, value in _primitive_types.items()
if token != 'any' and token != 'null'
] + [
# decimal + complex types
(
'(?P<{}>{})'.format(token.upper(), token),
lambda token, toktype=toktype: Token(toktype, token)
) for token, toktype in zip(
('decimal', 'varchar', 'char', 'array', 'map', 'struct'),
(
Tokens.DECIMAL,
Tokens.VARCHAR,
Tokens.CHAR,
Tokens.ARRAY,
Tokens.MAP,
Tokens.STRUCT
),
)
] + [
# numbers, for decimal spec
(r'(?P<INTEGER>\d+)', lambda token: Token(Tokens.INTEGER, int(token))),
# struct fields
(
r'(?P<FIELD>[a-zA-Z_][a-zA-Z_0-9]*)',
lambda token: Token(Tokens.FIELD, token)
),
('(?P<COMMA>,)', lambda token: Token(Tokens.COMMA, token)),
('(?P<COLON>:)', lambda token: Token(Tokens.COLON, token)),
(r'(?P<LPAREN>\()', lambda token: Token(Tokens.LPAREN, token)),
(r'(?P<RPAREN>\))', lambda token: Token(Tokens.RPAREN, token)),
('(?P<LBRACKET><)', lambda token: Token(Tokens.LBRACKET, token)),
('(?P<RBRACKET>>)', lambda token: Token(Tokens.RBRACKET, token)),
(r'(?P<WHITESPACE>\s+)', None),
]
)
_TYPE_KEYS = tuple(_TYPE_RULES.keys())
_TYPE_PATTERN = re.compile('|'.join(_TYPE_KEYS), flags=re.IGNORECASE)
def _generate_tokens(pat, text):
"""Generate a sequence of tokens from `text` that match `pat`
Parameters
----------
pat : compiled regex
The pattern to use for tokenization
text : str
The text to tokenize
"""
rules = _TYPE_RULES
keys = _TYPE_KEYS
groupindex = pat.groupindex
for m in iter(pat.scanner(text).match, None):
func = rules[keys[groupindex[m.lastgroup] - 1]]
if func is not None:
assert callable(func), 'func must be callable'
yield func(m.group(m.lastgroup))
class TypeParser(object):
"""A type parser for complex types.
Parameters
----------
text : str
The text to parse
Notes
-----
Adapted from David Beazley's and Brian Jones's Python Cookbook
"""
def __init__(self, text):
self.text = text
self.tokens = _generate_tokens(_TYPE_PATTERN, text)
self.tok = None
self.nexttok = None
def _advance(self):
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
def _accept(self, toktype):
if self.nexttok is not None and self.nexttok.type == toktype:
self._advance()
return True
return False
def _expect(self, toktype):
if not self._accept(toktype):
raise SyntaxError('Expected {0} after {1!r} in {2!r}'.format(
Tokens.name(toktype),
self.tok.value,
self.text,
))
def parse(self):
self._advance()
# any and null types cannot be nested
if self._accept(Tokens.ANY) or self._accept(Tokens.NULL):
return self.tok.value
t = self.type()
if self.nexttok is None:
return t
else:
# additional junk was passed at the end, throw an error
additional_tokens = []
while self.nexttok is not None:
additional_tokens.append(self.nexttok.value)
self._advance()
raise SyntaxError(
'Found additional tokens {0}'.format(additional_tokens)
)
def type(self):
"""
type : primitive
| decimal
| array
| map
| struct
primitive : "any"
| "null"
| "boolean"
| "int8"
| "int16"
| "int32"
| "int64"
| "float"
| "double"
| "string"
| "timestamp"
decimal : "decimal"
| "decimal" "(" integer "," integer ")"
integer : [0-9]+
array : "array" "<" type ">"
map : "map" "<" type "," type ">"
struct : "struct" "<" field ":" type ("," field ":" type)* ">"
field : [a-zA-Z_][a-zA-Z_0-9]*
"""
if self._accept(Tokens.PRIMITIVE):
return self.tok.value
elif self._accept(Tokens.DECIMAL):
if self._accept(Tokens.LPAREN):
self._expect(Tokens.INTEGER)
precision = self.tok.value
self._expect(Tokens.COMMA)
self._expect(Tokens.INTEGER)
scale = self.tok.value
self._expect(Tokens.RPAREN)
else:
precision = 9
scale = 0
return Decimal(precision, scale)
elif self._accept(Tokens.VARCHAR) or self._accept(Tokens.CHAR):
# VARCHAR, VARCHAR(n), CHAR, and CHAR(n) all parse as STRING
if self._accept(Tokens.LPAREN):
self._expect(Tokens.INTEGER)
self._expect(Tokens.RPAREN)
return string
return string
elif self._accept(Tokens.ARRAY):
self._expect(Tokens.LBRACKET)
value_type = self.type()
self._expect(Tokens.RBRACKET)
return Array(value_type)
elif self._accept(Tokens.MAP):
self._expect(Tokens.LBRACKET)
self._expect(Tokens.PRIMITIVE)
key_type = self.tok.value
self._expect(Tokens.COMMA)
value_type = self.type()
self._expect(Tokens.RBRACKET)
return Map(key_type, value_type)
elif self._accept(Tokens.STRUCT):
self._expect(Tokens.LBRACKET)
self._expect(Tokens.FIELD)
names = [self.tok.value]
self._expect(Tokens.COLON)
types = [self.type()]
while self._accept(Tokens.COMMA):
self._expect(Tokens.FIELD)
names.append(self.tok.value)
self._expect(Tokens.COLON)
types.append(self.type())
self._expect(Tokens.RBRACKET)
return Struct(names, types)
else:
raise SyntaxError('Type cannot be parsed: {0}'.format(self.text))
def validate_type(t):
if isinstance(t, DataType):
return t
return TypeParser(t).parse()
def array_type(t):
# compatibility
return validate_type(t).array_type()
def scalar_type(t):
# compatibility
return validate_type(t).scalar_type()
| apache-2.0 | -6,465,158,278,819,873,000 | 23.407134 | 79 | 0.545835 | false |
Mariaanisimova/pythonintask | IVTp/2014/Shcherbakov_R_A/task_09_22.py | 1 | 1636 | # Задача 9. Вариант 22.
# Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен
# его отгадать. Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток
# узнать, есть ли какая-либо буква в слове, причем программа может отвечать только
# "Да" и "Нет". Вслед за тем игрок должен попробовать отгадать слово.
# Щербаков Р.А.
# 22.05.2016
import random
words="Сессия","Питон","Автомат","РГСУ","Расписание"
rand=random.randint(0,4)
massiv=list(words[rand].lower())
print("Ты попал на поле чудес, только тут мы не говорим где находится буква которую \
угадаешь.\nТема: Учеба\nБукв: "+str(len(massiv)))
popitka=5
inp=""
text="Угадали"
while popitka!=0:
if input("У тебя "+str(popitka)+" попыток\nВведите букву: ") in massiv:
print("Да")
else:
print("Нет")
popitka-=1
while inp.lower()!=words[rand].lower():
inp=input("Введите слово: ")
if(inp.lower()=="я слабак"):
inp=words[rand]
text="Слабак"
elif(inp.lower()==words[rand].lower()):
text="Угадали"
else:
print("Попытайтесь еще раз\nНаберите 'Я слабак' для выхода")
input("\nВы "+text)
| apache-2.0 | 3,770,639,365,246,442,000 | 31.571429 | 85 | 0.67807 | false |
dsonbill/DMPHive | xmlrsa.py | 1 | 3988 | import rsa
import base64
import math
import xml.etree.ElementTree as ET
# Utility functions
def bytes_to_int(byte_data):
return int.from_bytes(byte_data, 'big')
def bytes_from_int(integer):
byte_length = math.ceil(integer.bit_length() / 8)
return integer.to_bytes(byte_length, 'big')
class RSA():
def __init__(self, key_size=None):
if key_size is not None:
self.public_key, self.private_key = rsa.newkeys(key_size)
self.public_key_xml, self.private_key_xml = self.get_keys_xml_string(self.private_key)
self.initialized = True
def sign(self, message, hash):
if self.initialized:
return rsa.sign(message, self.private_key, hash)
def verify(self, message, signature):
if self.initialized:
return rsa.verify(message, signature, self.public_key)
def load_keys_xml(self, filename_private_key):
# Build public and private key object
rsa_xml = ET.parse(filename_private_key).getroot()
modulus_xml = rsa_xml.find('Modulus')
exponent_xml = rsa_xml.find('Exponent')
d_xml = rsa_xml.find('D')
p_xml = rsa_xml.find('P')
q_xml = rsa_xml.find('Q')
modulus_int = bytes_to_int(base64.standard_b64decode(modulus_xml.text))
modulus_bytes = base64.standard_b64decode(modulus_xml.text)
modulus_bytes_tested = bytes_from_int(bytes_to_int(modulus_bytes))
if modulus_bytes != modulus_bytes_tested:
raise Exception('A modulus mismatch was encountered with xmlrsa. Please check your rsa key modulus!')
exponent_int = bytes_to_int(base64.standard_b64decode(exponent_xml.text))
d_int = bytes_to_int(base64.standard_b64decode(d_xml.text))
p_int = bytes_to_int(base64.standard_b64decode(p_xml.text))
q_int = bytes_to_int(base64.standard_b64decode(q_xml.text))
# Set key objects
self.public_key = rsa.PublicKey(modulus_int, exponent_int)
self.private_key = rsa.PrivateKey(modulus_int, exponent_int, d_int, p_int, q_int)
# Set key xml strings
self.public_key_xml, self.private_key_xml = self.get_keys_xml_string(self.private_key)
# Set initialized flag
self.initialized = True
def save_keys_xml(self, filename_private_key):
if self.initialized:
with open(filename_private_key, 'w') as file:
file.write(self.private_key_xml)
@staticmethod
def get_keys_xml_string(private_key):
rsa_key_value_xml = ET.Element('RSAKeyValue')
modulus_xml = ET.SubElement(rsa_key_value_xml, 'Modulus')
exponent_xml = ET.SubElement(rsa_key_value_xml, 'Exponent')
modulus_xml.text = base64.standard_b64encode(bytes_from_int(private_key.n)).decode('utf-8')
exponent_xml.text = base64.standard_b64encode(bytes_from_int(private_key.e)).decode('utf-8')
pubkey = ET.tostring(rsa_key_value_xml).decode('utf-8')
d_xml = ET.SubElement(rsa_key_value_xml, 'D')
p_xml = ET.SubElement(rsa_key_value_xml, 'P')
q_xml = ET.SubElement(rsa_key_value_xml, 'Q')
dp_xml = ET.SubElement(rsa_key_value_xml, 'DP')
dq_xml = ET.SubElement(rsa_key_value_xml, 'DQ')
inverseq_xml = ET.SubElement(rsa_key_value_xml, 'InverseQ')
d_xml.text = base64.standard_b64encode(bytes_from_int(private_key.d)).decode('utf-8')
p_xml.text = base64.standard_b64encode(bytes_from_int(private_key.p)).decode('utf-8')
q_xml.text = base64.standard_b64encode(bytes_from_int(private_key.q)).decode('utf-8')
dp_xml.text = base64.standard_b64encode(bytes_from_int(private_key.exp1)).decode('utf-8')
dq_xml.text = base64.standard_b64encode(bytes_from_int(private_key.exp2)).decode('utf-8')
inverseq_xml.text = base64.standard_b64encode(bytes_from_int(private_key.coef)).decode('utf-8')
privkey = ET.tostring(rsa_key_value_xml).decode('utf-8')
return pubkey, privkey
| cc0-1.0 | -5,861,631,085,719,822,000 | 40.978947 | 113 | 0.650201 | false |
mice-software/maus | bin/scifi/GenerateMomentumCorrections.py | 1 | 2173 | #!/usr/bin/env python
"""
Generate an MC data file and calculate the required Pattern Recognition
momentum corrections required for the track reconstruction.
This will simulate MICE spills through the entirety of MICE using Geant4, then
digitize and reconstruct tracker hits to space points. Finally a
reducer is used to analysis the MC truth and reconstructed tracks in order to
calculate the required corrections.
"""
import os
import MAUS # MAUS libraries
# pylint: disable = C0103
config_file = os.path.join(os.getenv("MAUS_ROOT_DIR"),
"bin/scifi/Conf_PR_Momentum_Corrections.py")
def run():
""" Run the macro
"""
# This input generates empty spills, to be filled by the beam maker later on
my_input = MAUS.InputPySpillGenerator()
# Create an empty array of mappers, then populate it
# with the functionality you want to use.
my_map = MAUS.MapPyGroup()
# GEANT4
my_map.append(MAUS.MapPyBeamMaker()) # beam construction
my_map.append(MAUS.MapCppSimulation()) # geant4 simulation
# Pre detector set up
# my_map.append(MAUS.MapPyMCReconSetup()) # geant4 simulation
my_map.append(MAUS.MapCppMCReconSetup()) # geant4 simulation
# SciFi
my_map.append(MAUS.MapCppTrackerMCDigitization()) # SciFi electronics model
my_map.append(MAUS.MapCppTrackerClusterRecon()) # SciFi channel clustering
my_map.append(MAUS.MapCppTrackerSpacePointRecon()) # SciFi spacepoint recon
my_map.append(MAUS.MapCppTrackerPatternRecognition()) # SciFi track finding
# my_map.append(MAUS.MapCppTrackerTrackFit()) # SciFi track fit
# Momentum Corrections Reducer
my_reduce = MAUS.ReduceCppSciFiMomentumCorrections()
# Then construct a MAUS output component - filename comes from datacards
my_output = MAUS.OutputCppRoot()
# can specify datacards here or by using appropriate command line calls
datacards = open(config_file, 'r')
# The Go() drives all the components you pass in, then check the file
# (default simulation.out) for output
MAUS.Go(my_input, my_map, my_reduce, my_output, datacards)
if __name__ == '__main__':
run()
| gpl-3.0 | 8,634,506,744,995,396,000 | 34.622951 | 80 | 0.716521 | false |
AhmedFat7y/configure-c--projects-for-ssi | main.py | 1 | 7785 | import xml.etree.ElementTree as ET
from os.path import isfile, join, abspath, dirname, lexists
from os import listdir
from sys import argv
import argparse
import pdb, getopt
class ProjectConfigurator:
#SSI_PLUGIN_SOURCE_DIRECTORY = "build\\%s"
def __init__(self, argv):
self.XML_TAG_SUFFIX = "%s"
self.INPUT_FILE = ""
self.SSI_PLUGIN_NAME = ""
self.SSI_DIR_PREFIX = "%s"
self.SSI_CURRENT_PLUGIN_PREFIX = '%s'
self.SSI_INCLUDE_DIRECTORY = 'core\\include\\;'
self.SSI_LIB_DIRECTORY = 'libs\\Win32\\vc10\\;'
self.SSI_BIN_DIRECTORY = 'bin\\Win32\\vc10\\'
self.SSI_PLUGINS_DIRECTORY = 'plugins\\'
self.SSI_PLUGINS_INCLUDE_DIRECTORY = 'plugins\\%s\\include\\;'
self.SSI_PLUGIN_SOURCE_DIRECTORY = "source\\%s"
self.SSI_PLUGIN_INCLUDE_DIRECTORY = "include\\%s"
self.parseCommandLineOptions()
self.initializeRelativePaths()
def parseCommandLineOptions(self):
parser = argparse.ArgumentParser(description='Configure c++ project for ssi framework.')
parser.add_argument('iFile', metavar='input-file',
help='The project file to be edited.')
parser.add_argument('-p', '--inlculde-plugins', nargs="*", dest="included_plugins",
help='Name of plugins to add their include folders to the current project')
parser.add_argument('-l', '--additional-deps', nargs="*", dest="additional_dependencies",
help='Additional dependencies (ex: xsens.lib)')
parser.add_argument('-d', '--additional-libs-dirs', nargs="*", dest="additional_libs_directries",
help='Starting with a plugin name specify the folder to additiona libraries directories (ex: xsens\\build\\bin\\)')
args = parser.parse_args()
self.INPUT_FILE = args.iFile
#pdb._trace()
def editItemGroup(self, rootnode):
maincppfile_name = (self.SSI_CURRENT_PLUGIN_PREFIX % self.SSI_PLUGIN_SOURCE_DIRECTORY) % (self.SSI_PLUGIN_NAME + ".cpp")
exportcppfile_name = (self.SSI_CURRENT_PLUGIN_PREFIX % self.SSI_PLUGIN_SOURCE_DIRECTORY) % ("Export" + self.SSI_PLUGIN_NAME + ".cpp")
mainheaderfile_name = (self.SSI_CURRENT_PLUGIN_PREFIX % self.SSI_PLUGIN_INCLUDE_DIRECTORY) % (self.SSI_PLUGIN_NAME + ".h")
ssiheaderfile_name = (self.SSI_CURRENT_PLUGIN_PREFIX % self.SSI_PLUGIN_INCLUDE_DIRECTORY) % ("ssi" + self.SSI_PLUGIN_NAME.lower() + ".h")
"""
cppfilesGroup = createNewElement(rootnode, "ItemGroup")
findChild(cppfilesGroup, "ClCompile").set("Include", maincppfile_name)
findChild(cppfilesGroup, "ClCompile").set("Include", exportcppfile_name)
headerfilesGroup = createNewElement(rootnode, "ItemGroup")
findChild(headerfilesGroup, "ClCompile").set("Include", mainheaderfile_name)
findChild(headerfilesGroup, "ClCompile").set("Include", ssiheaderfile_name)
"""
files_names = (maincppfile_name, exportcppfile_name, mainheaderfile_name, ssiheaderfile_name)
for file_name in files_names:
if not lexists(file_name):
with open(file_name, 'w') as outfile:
outfile.write("")
outfile.flush()
pass
def writetree(self, tree, inputfile):
treecontent = ET.tostring(tree.getroot()).replace("ns0:", "").replace(":ns0", "")
with open(inputfile , 'w') as out_file:
out_file.write(treecontent)
out_file.flush()
#tree.write(INPUT_FILE + ".xml")
pass
def createNewElement(self, parentnode, newelement_name):
newelement = ET.SubElement(parentnode, self.XML_TAG_SUFFIX % newelement_name)
return newelement
pass
def findChild(self, parentnode, childnode_name):
if parentnode.find(self.XML_TAG_SUFFIX % childnode_name) is None:
self.createNewElement(parentnode, childnode_name)
return parentnode.find(self.XML_TAG_SUFFIX % childnode_name)
pass
def editDebugProperties(self, properties_node):
#pdb.set_trace()
self.findChild(properties_node, "TargetName").text = "ssi$(ProjectName)d"
self.findChild(properties_node, "OutDir").text = self.SSI_DIR_PREFIX % self.SSI_BIN_DIRECTORY
pass
def editReleaseProperties(self, properties_node):
self.findChild(properties_node, "TargetName").text = "ssi$(ProjectName)"
self.findChild(properties_node, "OutDir").text = self.SSI_DIR_PREFIX % self.SSI_BIN_DIRECTORY
pass
def editReleaseLink(self, link_node):
self.findChild(link_node, "AdditionalLibraryDirectories").text = (self.SSI_DIR_PREFIX % self.SSI_LIB_DIRECTORY) + (self.SSI_DIR_PREFIX % self.SSI_BIN_DIRECTORY) + ";"
self.findChild(link_node, "AdditionalDependencies").text = ""
self.findChild(link_node, "OutputFile").text = "$(TargetPath)"
pass
def editReleaseClCompile(self, clcompile_node):
self.findChild(clcompile_node, "AdditionalIncludeDirectories").text = (self.SSI_DIR_PREFIX % self.SSI_INCLUDE_DIRECTORY) + (self.SSI_CURRENT_PLUGIN_PREFIX % (self.SSI_PLUGIN_INCLUDE_DIRECTORY % ""))
pass
def editDebugClCompile(self, clcompile_node):
self.findChild(clcompile_node, "AdditionalIncludeDirectories").text = (self.SSI_DIR_PREFIX % self.SSI_INCLUDE_DIRECTORY) + (self.SSI_CURRENT_PLUGIN_PREFIX % (self.SSI_PLUGIN_INCLUDE_DIRECTORY % ""))
pass
def editDebugLink(self, link_node):
self.findChild(link_node, "AdditionalLibraryDirectories").text = (self.SSI_DIR_PREFIX % self.SSI_LIB_DIRECTORY) + (self.SSI_DIR_PREFIX % self.SSI_BIN_DIRECTORY) + ";"
self.findChild(link_node, "AdditionalDependencies").text = ""
self.findChild(link_node, "OutputFile").text = "$(TargetPath)"
pass
def initializeRelativePaths(self):
current_dir = abspath('')
new_dir = self.SSI_DIR_PREFIX
current_plugin_dir = self.SSI_CURRENT_PLUGIN_PREFIX
dir_files = listdir(current_dir)
while current_dir[-3:] != "ssi":
current_dir = dirname(current_dir)
new_dir = '..\\' + new_dir
if not (dir_files.count("include") == 1 and dir_files.count("source") == 1 and dir_files.count("build") == 1):
current_plugin_dir = '..\\' + current_plugin_dir
dir_files = listdir(current_dir)
print "- ", dir_files
self.SSI_DIR_PREFIX = new_dir
self.SSI_CURRENT_PLUGIN_PREFIX = current_plugin_dir
print "- ", self.INPUT_FILE, "- ", self.INPUT_FILE.rfind(".vcxproj")
self.SSI_PLUGIN_NAME = self.INPUT_FILE[0:self.INPUT_FILE.rfind(".vcxproj")]
def start(self):
tree = ET.parse(self.INPUT_FILE)
root = tree.getroot()
roottag = root.tag
self.XML_TAG_SUFFIX = roottag[roottag.find("{") : roottag.find("}") + 1] + self.XML_TAG_SUFFIX
for child in root:
if child.tag == self.XML_TAG_SUFFIX % "PropertyGroup" and not child.attrib.has_key("Label") and child.attrib.has_key("Condition"):
if child.attrib["Condition"] == "'$(Configuration)|$(Platform)'=='Debug|Win32'":
self.editDebugProperties(child)
elif child.attrib["Condition"] == "'$(Configuration)|$(Platform)'=='Release|Win32'":
self.editReleaseProperties(child)
else:
pass
elif child.tag == self.XML_TAG_SUFFIX % "ItemDefinitionGroup" and child.attrib.has_key("Condition"):
compile_child = child.find(self.XML_TAG_SUFFIX % "ClCompile")
link_child = child.find(self.XML_TAG_SUFFIX % "Link")
if child.attrib["Condition"] == "'$(Configuration)|$(Platform)'=='Release|Win32'":
self.editReleaseClCompile(compile_child)
self.editReleaseLink(link_child)
else:
self.editReleaseClCompile(compile_child)
self.editDebugLink(link_child)
else:
pass
self.editItemGroup(root)
#print '- ', ET.tostringlist(root)
self.writetree(tree, self.INPUT_FILE)
if __name__ == "__main__":
instance = ProjectConfigurator(argv)
instance.start() | gpl-2.0 | -5,115,269,590,821,743,000 | 48.278481 | 202 | 0.669878 | false |
Savvysherpa/provenance | provenance/migrations/env.py | 1 | 2032 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from provenance import models
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
connectable = config.attributes.get('connection', None)
if connectable is None:
# only create Engine if we don't have a Connection
# from the outside
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
# when connectable is already a Connection object, calling
# connect() gives us a *branched connection*.
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | -6,248,009,298,735,235,000 | 28.449275 | 69 | 0.692913 | false |
novapost/python-pussycache | setup.py | 1 | 2924 | # -*- coding: utf-8 -*-
"""Python packaging."""
from os.path import abspath, dirname, join
from setuptools import setup
def read_relative_file(filename):
"""Returns contents of the given file, which path is supposed relative
to this module."""
with open(join(dirname(abspath(__file__)), filename)) as f:
return f.read()
def packages(project_name):
"""Return list of packages distributed by project based on its name.
>>> packages('foo')
['foo']
>>> packages('foo.bar')
['foo', 'foo.bar']
>>> packages('foo.bar.baz')
['foo', 'foo.bar', 'foo.bar.baz']
>>> packages('FooBar')
['foobar']
Implements "Use a single name" convention described in :pep:`423`.
"""
name = str(project_name).lower()
if '.' in name: # Using namespace packages.
parts = name.split('.')
return ['.'.join(parts[0:i]) for i in range(1, len(parts) + 1)]
else: # One root package or module.
return [name]
def namespace_packages(project_name):
"""Return list of namespace packages distributed in this project, based on
project name.
>>> namespace_packages('foo')
[]
>>> namespace_packages('foo.bar')
['foo']
>>> namespace_packages('foo.bar.baz')
['foo', 'foo.bar']
>>> namespace_packages('Foo.BaR.BAZ') == namespace_packages('foo.bar.baz')
True
Implements "Use a single name" convention described in :pep:`423`.
"""
package_list = packages(project_name)
package_list.pop() # Ignore last element.
# Remaining packages are supposed to be namespace packages.
return package_list
NAME = 'pussycache'
version = read_relative_file('VERSION').strip()
readme = read_relative_file('README.md')
requirements = []
dependency_links = []
entry_points = {
}
if __name__ == '__main__': # ``import setup`` doesn't trigger setup().
setup(name=NAME,
version=version,
description="""Cache Backend system for python objects""",
long_description=readme,
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
'Framework :: Django',
],
keywords='cache',
author='Novapost Team',
author_email='[email protected]',
url='https://github.com/novapost/%s' % NAME,
license='BSD',
packages=packages(NAME),
namespace_packages=namespace_packages(NAME),
include_package_data=True,
zip_safe=False,
install_requires=requirements,
dependency_links=dependency_links,
entry_points=entry_points,
test_suite='nose.collector',
setup_requires=['nose'],
tests_require=['redis', 'django'])
| mit | -3,581,475,447,714,496,500 | 29.778947 | 78 | 0.588577 | false |
jokey2k/pyClanSphere | pyClanSphere/plugins/bulletin_board/database.py | 1 | 3321 | # -*- coding: utf-8 -*-
"""
pyClanSphere.plugins.bulletin_board.database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Our needed tables are declared here (now)
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from pyClanSphere.database import db, metadata
# Mapping these out from db module to increases readability further down
for var in ['Table', 'Column', 'String', 'Integer', 'Boolean', 'DateTime', 'ForeignKey', 'Text']:
globals()[var] = getattr(db,var)
board_categories = Table('board_categories', metadata,
Column('category_id', Integer, primary_key=True),
Column('name', String(50)),
Column('ordering', Integer)
)
board_forums = Table('board_forums', metadata,
Column('forum_id', Integer, primary_key=True),
Column('category_id', ForeignKey('board_categories.category_id')),
Column('name', String(50)),
Column('description', String(255)),
Column('ordering', Integer),
Column('allow_anonymous', Boolean),
Column('is_public', Boolean),
Column('is_public', Boolean),
Column('topiccount', Integer),
Column('postcount', Integer),
Column('modification_date', DateTime),
Column('lasttopic_id', Integer, ForeignKey('board_topics.topic_id', name="forum_lasttopic", use_alter=True)),
Column('lastpost_id', Integer, ForeignKey('board_posts.post_id', name="forum_lastpost", use_alter=True))
)
board_topics = Table('board_topics', metadata,
Column('topic_id', Integer, primary_key=True),
Column('forum_id', ForeignKey('board_forums.forum_id')),
Column('name', String(255)),
Column('date', DateTime, default=datetime.utcnow()),
Column('author_id', ForeignKey('users.user_id')),
Column('author_str', String(40)),
Column('is_sticky', Boolean),
Column('is_locked', Boolean),
Column('is_global', Boolean),
Column('is_solved', Boolean),
Column('is_external', Boolean),
Column('lastpost_id', Integer, ForeignKey('board_posts.post_id', name="topic_lastpost", use_alter=True)),
Column('postcount', Integer),
Column('modification_date', DateTime)
)
board_posts = Table('board_posts', metadata,
Column('post_id', Integer, primary_key=True),
Column('topic_id', ForeignKey('board_topics.topic_id')),
Column('text', Text),
Column('author_id', ForeignKey('users.user_id')),
Column('author_str', String(40)),
Column('date', DateTime, default=datetime.utcnow()),
Column('ip', String(40)),
)
board_global_lastread = Table('board_global_lastread', metadata,
Column('user_id', ForeignKey('users.user_id'), primary_key=True),
Column('date', DateTime, default=datetime.utcnow())
)
board_local_lastread = Table('board_local_lastread', metadata,
Column('user_id', ForeignKey('users.user_id'), primary_key=True),
Column('topic_id', ForeignKey('board_topics.topic_id'), primary_key=True),
Column('date', DateTime, default=datetime.utcnow())
)
def init_database(app):
""" This is for inserting our new table"""
engine = app.database_engine
metadata.create_all(engine)
__all__ = ['board_categories', 'board_forums', 'board_topics', 'board_posts',
'board_local_lastread', 'board_global_lastread']
| bsd-3-clause | -5,117,481,362,407,669,000 | 37.616279 | 113 | 0.662752 | false |
wrohdewald/Gpxity | gpxity/accounts.py | 1 | 12247 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Wolfgang Rohdewald <[email protected]>
# See LICENSE for details.
# The source in this file is inspired by and partially identical with paramiko.config
"""Configuration file for accounts in Backends."""
import os
import re
import copy
import tempfile
from gpxpy.geo import Location
__all__ = ['Fences', 'Account', 'DirectoryAccount', 'MemoryAccount']
class Fences: # pylint: disable=too-few-public-methods
"""
Defines circles.
Args:
config_str: The string from the accounts file
Attributes:
center (GPXTrackPoint): The center
radius (meter): The radius in meters
"""
def __init__(self, config_str: str):
"""init."""
self.string = config_str or 'None'
self.circles = list()
if config_str is not None:
for fence in config_str.split(' '):
parts = fence.split('/')
if len(parts) != 3:
raise ValueError('fence needs 3 parts: {}'.format(fence))
try:
parts = [x.strip() for x in parts]
center = Location(float(parts[0]), float(parts[1]))
radius = float(parts[2])
except Exception:
raise ValueError('Fence definition is wrong: {}'.format(fence))
circle = (center, radius)
self.circles.append(circle)
def outside(self, point) ->bool:
"""Determine if point is outside of all fences.
Returns: True or False.
"""
return all(point.distance_2d(x[0]) > x[1] for x in self.circles)
def __str__(self): # noqa
return self.string
def __repr__(self): # noqa
return 'Fences({})'.format(str(self))
def __bool__(self):
"""True if we actually fence.
Returns: Result
"""
return bool(self.circles)
class Accounts:
"""Representation of config information as stored in the format used by Gpxity.
Queries can be made via `lookup`. The keyword :literal:`Account` only allows one name.
Keywords are case insensitive, arguments are not.
Example for an entry in the accounts file:
::
Account wp
Backend WPTrackserver
Username wordpress_username
Url localhost
Mysql wordpress_7@wordpress_7
Password xxxx
Fences 53.7505,10.7445/750
"""
# pylint: disable=too-few-public-methods
__SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
__account_files = dict()
@classmethod
def __parse(cls, path):
"""Parse an accounts file."""
if path not in cls.__account_files:
if not os.path.exists(path):
return
cls.__account_files[path] = cls.__parse_accounts(path)
@classmethod
def __parse_accounts(cls, filename):
"""Parse all accounts from filename.
Returns: dict with all accounts.filename
"""
result = dict()
with open(filename) as file_obj:
for _ in cls.__yield_accounts(file_obj):
result[_['name']] = _
return result
@staticmethod
def __strip_whitespace(file_obj):
"""Filter out comments, strip lines."""
for line in file_obj:
line = line.strip()
if line and not line.startswith('#'):
yield line
@classmethod
def __yield_matches(cls, file_obj):
"""Yield usable lines."""
for line in cls.__strip_whitespace(file_obj):
match = re.match(cls.__SETTINGS_REGEX, line)
if not match:
raise Exception('Unparsable line {}'.format(line))
yield match
@classmethod
def __yield_accounts(cls, file_obj):
"""Generate all accounts."""
account = {'name': 'global'}
for match in cls.__yield_matches(file_obj):
key = match.group(1).lower()
value = match.group(2)
if key == 'account':
if account is not None:
yield account
account = {'name': value.lower()}
continue
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if key not in account:
account[key] = value
if account is not None:
yield account
@classmethod
def lookup(cls, filename: str, wanted_account: str):
"""
Build an :class:`~gpxity.accounts.Account`.
Args:
filename: The name of the accounts file
wanted_account: The name to look for in the accounts file
Returns: dict
"""
cls.__parse(filename)
return copy.deepcopy(cls.__account_files[filename][wanted_account.lower()])
class Account:
"""As parsed from the accounts file.
Attributes can be referenced as account.xxxx where xxx is an arbitrary
value in the account definition from the accounts file.
Args:
name: The name of the account. Must exist in the accounts file.
filename: Name of the accounts file. Default is Account.path
kwargs: Additional parameters added to the account. They have precedence.
If both name and file are None, only :literal:`**kwargs` are used.
Attributes:
path: Default value for the accounts file
name: The name of the account
config: A dict with all config values
backend: The name of the backend class
fences: The backend will never write points within fences.
You can define any number of fences separated by spaces. Every fence is a circle.
It has the form Lat/Long/meter.
Lat and Long are the center position in decimal degrees, meter is the radius.
"""
path = '~/.config/Gpxity/accounts'
def __init__(self, name=None, filename=None, **kwargs):
"""Create an Account."""
if name is None:
self.config = dict()
for key, value in kwargs.items():
self.config[key.lower()] = value
self.name = self.url or '.'
if not self.backend:
self.config['backend'] = 'Directory'
self._resolve_fences()
return
self.name = name
path = os.path.expanduser(filename or Account.path)
lookup_name = name.split(':')[0]
self.config = Accounts.lookup(path, lookup_name)
if not self.backend:
raise Exception('Account({}, {}, {}) defines no Backend'.format(name, filename, kwargs))
for key, value in kwargs.items():
self.config[key.lower()] = value
self._resolve_fences()
def _resolve_fences(self):
"""create self.fences as a Fences instance."""
if 'fences' in self.config:
_ = Fences(self.config['fences'])
del self.config['fences']
self.fences = _
else:
self.fences = Fences(None)
def __getattr__(self, key):
"""Only called if key is not an existing attribute.
Returns: The value or None
"""
try:
config = object.__getattribute__(self, 'config')
except AttributeError:
return None
return config.get(key.lower())
def __repr__(self):
"""For debugging output.
Returns: the str
"""
result = 'Account({}): backend={}'.format(self.account, self.backend)
if 'url' in self.config:
result += ' url={}'.format(self.url)
if 'username' in self.config:
result += ' username={}'.format(self.username)
return result + ')'
def __str__(self):
"""The account in a parseable form.
Returns: The string
"""
return self.name + ':'
class DirectoryAccount(Account):
"""This will not use an acocunts file but the optional file :literal:`.config`.
Args:
url: The name of the directory. If it does not exist, create it.
"" will translate into ".".
A trailing "/" will raise an Exception.
None will create a temporary directory.
kwargs: Additional parameters added to the account. They have precedence.
Attributes:
path: Default value for the accounts file
name: The name of the account
config: A dict with all config values
backend: The name of the backend class
is_temporary: True for temporary directories.
fences: The backend will never write points within fences.
You can define any number of fences separated by spaces. Every fence is a circle.
It has the form Lat/Long/meter.
Lat and Long are the center position in decimal degrees, meter is the radius.
prefix (str): Class attribute, may be changed. The default prefix for
temporary directories. Default value is :literal:`gpxity.`
"""
path = None
prefix = 'gpxity.'
def __init__(self, url=None, **kwargs): # pylint: disable=super-init-not-called
"""Create an Account."""
self.is_temporary = url is None
if self.is_temporary:
url = tempfile.mkdtemp(prefix=self.__class__.prefix)
if url == '':
url = '.'
if url == '/':
raise Exception('Directory / is not allowed')
if url.endswith('/') and url != '/':
raise Exception('DirectoryAccount: url {} must not end with /'.format(url))
self.config = dict()
if not os.path.exists(url):
os.makedirs(url)
path_parts = os.path.abspath(url).split('/') # TODO: should use os.path.separator
for _ in range(1, len(path_parts) + 1):
parts = path_parts[:_]
dirname = os.path.join(*parts)
config_name = '/' + os.path.join(dirname, '.gpxity_config')
if os.path.exists(config_name):
self.config.update(Accounts.lookup(config_name, 'global'))
self.config['backend'] = 'Directory'
self.config['url'] = url
for key, value in kwargs.items():
self.config[key.lower()] = value
self.name = url
self._resolve_fences()
def __repr__(self):
"""For debugging output.
Returns: the str
"""
return 'DirectoryAccount({})'.format(self.name)
def __str__(self):
"""The account in a parseable form.
Returns: The string
"""
if self.name == '.':
return ''
if self.name == '/':
return '/'
return self.name + '/'
class MemoryAccount(Account):
"""This will only use kwargs for configuration.
Args:
kwargs: Additional parameters added to the account. They have precedence.
Attributes:
name: The name of the account
config: A dict with all config values
backend: The name of the backend class
is_temporary: True for temporary directories.
fences: The backend will never write points within fences.
You can define any number of fences separated by spaces. Every fence is a circle.
It has the form Lat/Long/meter.
Lat and Long are the center position in decimal degrees, meter is the radius.
prefix (str): Class attribute, may be changed. The default prefix for
temporary directories. Default value is :literal:`gpxity.`
"""
# pylint: disable=too-few-public-methods
counter = 0
def __init__(self, name=None, **kwargs): # pylint: disable=super-init-not-called
"""Create an Account."""
self.config = dict()
self.config['backend'] = 'Memory'
for key, value in kwargs.items():
self.config[key.lower()] = value
if name is None:
name = 'in_memory_{}'.format(MemoryAccount.counter)
MemoryAccount.counter += 1
self.name = name
self._resolve_fences()
def __repr__(self):
"""For debugging output.
Returns: the str
"""
return 'MemoryAccount({})'.format(self.name) + ':'
| gpl-2.0 | -6,233,281,037,619,963,000 | 30.16285 | 100 | 0.570344 | false |
mahim97/zulip | zerver/tests/test_outgoing_webhook_interfaces.py | 8 | 4682 | # -*- coding: utf-8 -*-
from typing import Any
import mock
import json
from requests.models import Response
from zerver.lib.outgoing_webhook import GenericOutgoingWebhookService, \
SlackOutgoingWebhookService
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Service
class TestGenericOutgoingWebhookService(ZulipTestCase):
def setUp(self) -> None:
self.event = {
u'command': '@**test**',
u'message': {
'content': 'test_content',
}
}
self.handler = GenericOutgoingWebhookService(service_name='test-service',
base_url='http://example.domain.com',
token='abcdef',
user_profile=None)
def test_process_event(self) -> None:
rest_operation, request_data = self.handler.process_event(self.event)
request_data = json.loads(request_data)
self.assertEqual(request_data['data'], "@**test**")
self.assertEqual(request_data['token'], "abcdef")
self.assertEqual(rest_operation['base_url'], "http://example.domain.com")
self.assertEqual(rest_operation['method'], "POST")
self.assertEqual(request_data['message'], self.event['message'])
def test_process_success(self) -> None:
response = mock.Mock(spec=Response)
response.text = json.dumps({"response_not_required": True})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, None)
response.text = json.dumps({"response_string": 'test_content'})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, 'test_content')
response.text = json.dumps({})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, None)
mock_service = Service()
class TestSlackOutgoingWebhookService(ZulipTestCase):
def setUp(self) -> None:
self.event = {
u'command': '@**test**',
u'user_profile_id': 12,
u'service_name': 'test-service',
u'trigger': 'mention',
u'message': {
'content': 'test_content',
'type': 'stream',
'sender_realm_str': 'zulip',
'sender_email': '[email protected]',
'stream_id': '123',
'display_recipient': 'integrations',
'timestamp': 123456,
'sender_id': 21,
'sender_full_name': 'Sample User',
}
}
self.handler = SlackOutgoingWebhookService(base_url='http://example.domain.com',
token="abcdef",
user_profile=None,
service_name='test-service')
@mock.patch('zerver.lib.outgoing_webhook.get_service_profile', return_value=mock_service)
def test_process_event(self, mock_get_service_profile: mock.Mock) -> None:
rest_operation, request_data = self.handler.process_event(self.event)
self.assertEqual(rest_operation['base_url'], 'http://example.domain.com')
self.assertEqual(rest_operation['method'], 'POST')
self.assertEqual(request_data[0][1], "abcdef") # token
self.assertEqual(request_data[1][1], "zulip") # team_id
self.assertEqual(request_data[2][1], "zulip.com") # team_domain
self.assertEqual(request_data[3][1], "123") # channel_id
self.assertEqual(request_data[4][1], "integrations") # channel_name
self.assertEqual(request_data[5][1], 123456) # timestamp
self.assertEqual(request_data[6][1], 21) # user_id
self.assertEqual(request_data[7][1], "Sample User") # user_name
self.assertEqual(request_data[8][1], "@**test**") # text
self.assertEqual(request_data[9][1], "mention") # trigger_word
self.assertEqual(request_data[10][1], mock_service.id) # service_id
def test_process_success(self) -> None:
response = mock.Mock(spec=Response)
response.text = json.dumps({"response_not_required": True})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, None)
response.text = json.dumps({"text": 'test_content'})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, 'test_content')
| apache-2.0 | 5,945,918,141,294,065,000 | 44.456311 | 93 | 0.590773 | false |
franek/weboob | weboob/capabilities/gauge.py | 1 | 3325 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, StringField, FloatField, DateField, Field, UserError
__all__ = ['Gauge', 'GaugeSensor', 'GaugeMeasure', 'ICapGauge', 'SensorNotFound']
class SensorNotFound(UserError):
"""
Not found a sensor
"""
class Gauge(CapBaseObject):
"""
Gauge class.
"""
name = StringField('Name of gauge')
city = StringField('City of the gauge')
object = StringField('What is evaluate') # For example, name of a river
sensors = Field('List of sensors on the gauge', list)
class GaugeMeasure(CapBaseObject):
"""
Measure of a gauge sensor.
"""
level = FloatField('Level of measure')
date = DateField('Date of measure')
alarm = StringField('Alarm level')
def __init__(self):
CapBaseObject.__init__(self, None)
def __repr__(self):
if self.level:
return "<GaugeMeasure level=%f alarm=%s>" % (self.level, self.alarm)
else:
return "<GaugeMeasure NotAvailable>"
class GaugeSensor(CapBaseObject):
"""
GaugeSensor class.
"""
name = StringField('Name of the sensor')
unit = StringField('Unit of values')
forecast = StringField('Forecast')
lastvalue = Field('Last value', GaugeMeasure)
history = Field('Value history', list) # lastvalue not included
gaugeid = StringField('Id of the gauge')
def __repr__(self):
return "<GaugeSensor id=%s name=%s>" % (self.id, self.name)
class ICapGauge(IBaseCap):
def iter_gauges(self, pattern=None):
"""
Iter gauges.
:param pattern: if specified, used to search gauges.
:type pattern: str
:rtype: iter[:class:`Gauge`]
"""
raise NotImplementedError()
def iter_sensors(self, id, pattern=None):
"""
Iter instrument of a gauge.
:param: ID of the gauge
:param pattern: if specified, used to search sensors.
:type pattern: str
:rtype: iter[:class:`GaugeSensor`]
"""
raise NotImplementedError()
def iter_gauge_history(self, id):
"""
Get history of a gauge sensor.
:param id: ID of the gauge sensor
:type id: str
:rtype: iter[:class:`GaugeMeasure`]
"""
raise NotImplementedError()
def get_last_measure(self, id):
"""
Get last measures of a censor.
:param id: ID of the censor.
:type id: str
:rtype: :class:`GaugeMeasure`
"""
raise NotImplementedError()
| agpl-3.0 | 1,626,210,802,505,838,600 | 27.663793 | 95 | 0.623459 | false |
Centurion89/ModLogin | modules/Square.py | 1 | 2724 | import requests
import json
from lxml import html
from BaseModule import BaseModule
class Square(BaseModule):
def login(self, username, password, useragent):
useragent = BaseModule().define_user_agent(useragent)
headers = {'user-agent': useragent}
session = requests.Session()
login_page = session.get(
'https://www.squareup.com/login',
headers=headers)
login_page_html = html.fromstring(login_page.content)
# Load up CSRF token from cookies
csrf_token = session.cookies["_js_csrf"]
# Set POST payload
payload = {'email': username, 'password': password}
headers = {
'User-Agent': useragent,
'Host': 'api.squareup.com',
'Content-Type': 'application/json',
'Origin': 'https://squareup.com',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8',
'Referer': 'https://squareup.com/login',
'X-Csrf-Token': csrf_token
}
login_attempt = session.post(
'https://api.squareup.com/mp/login',
data=json.dumps(payload),
headers=headers, cookies=session.cookies, allow_redirects=False
)
auth_results = login_attempt.json()
# If API returns 200 and JSON key with "trusted_devices", login was
# successful
if (login_attempt.status_code == 200 and
'trusted_device' in auth_results):
return {
'module': self.__class__.__name__,
'auth_result': 'SUCCESS',
'display_name': '',
'display_handle': ''
}
# If JSON value contains error message, login failed
elif login_attempt.status_code == 401 or 'error' in auth_results:
return {
'module': self.__class__.__name__,
'auth_result': 'FAILED',
'display_name': '',
'display_handle': ''
}
else:
# If none of the above occur, must be unknown issue
# Output a copy of the HTML that was returned for debugging
debug_filename = str(self.__class__.__name__) + \
"_" + username + "_debug.html"
with open("./debug/" + debug_filename, "a+") as f:
f.write(json.dumps(auth_results))
return {
'module': self.__class__.__name__,
'auth_result': 'ERROR',
'display_name': '',
'display_handle': ''
}
square = Square()
| mit | 6,636,884,052,703,252,000 | 35.810811 | 75 | 0.517254 | false |
discos/basie | src/configobj/configobj.py | 1 | 87684 | # configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
import collections
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, str):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, str):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, str):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, collections.Mapping) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, str):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, str):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], collections.Mapping) and
isinstance(val, collections.Mapping)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, str):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, str):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, str) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('%s({%s})' % (self.__class__.__name__,
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, str):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, bytes) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, str):
return infile.splitlines(True)
elif isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, bytes) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, str):
return infile.splitlines(True)
if isinstance(infile, bytes):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, bytes):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, bytes) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, str):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, str):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, bytes):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from .validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, str):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), collections.Mapping):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
| bsd-3-clause | -3,679,440,947,610,029,600 | 34.399273 | 114 | 0.527565 | false |
gjost/django-linkpile | runtests.py | 1 | 1091 | import sys
try:
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="linkpile.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"linkpile",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
from django_nose import NoseTestSuiteRunner
except ImportError:
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:]) | bsd-3-clause | 8,139,445,616,342,015,000 | 20 | 85 | 0.549954 | false |
procool/mygw | web/apps/cabinet/views.py | 1 | 2966 | import logging
import datetime
from sqlalchemy import func, and_, or_, not_
from flask import url_for, session
from misc.mixins import myTemplateView, JSONView
from utils.arp_list import get_mac_by_ip
from models.all_models import InetEther, ARPCache
from models.session import session
from utils.server.http_client import HTTPClient
from libs.pfctl import PFCtl
class checkIPMixin(object):
def check_for_ip(self):
self.request.remote_ether = session.query(ARPCache).filter(ARPCache.ip==self.request.remote_addr).first()
if self.request.remote_ether is None:
logging.error('IP: %s not found in cached arp list!' % self.request.remote_addr)
self.request.remote_ether = get_mac_by_ip(self.request.remote_addr)
else:
self.request.remote_ether = self.request.remote_ether.mac
if self.request.remote_ether is None or self.request.remote_addr is None:
return None
addr = session.query(InetEther).filter(InetEther.mac==self.request.remote_ether).first()
if addr is None:
logging.error('XXX4.1')
addr = InetEther()
addr.mac = self.request.remote_ether
if addr.ip != self.request.remote_addr or not addr.is_active:
logging.error('XXX4.2')
addr.ip = self.request.remote_addr
addr.is_active = True
addr.lastupdate = func.now()
session.add(addr)
addrs = session.query(InetEther).filter(not_(InetEther.mac==self.request.remote_ether))
addrs = addrs.filter(InetEther.ip==self.request.remote_addr)
addrs.update({"is_active": False})
return addr
class cabinetView(checkIPMixin, myTemplateView):
template='cabinet/cabinet-ajax.tpl'
def get_context_data(self, **kwargs):
addr = self.check_for_ip()
context = super(cabinetView, self).get_context_data(**kwargs)
context['addr_obj'] = addr
if addr is None:
context['access_type'] = 'UNDEFINED'
elif addr.access_type == 'tor':
context['access_type'] = 'TOR'
else:
context['access_type'] = 'DIRECT'
return context
class setIPView(checkIPMixin, JSONView):
__ctlsrv = HTTPClient(port=6999)
def get_context_data(self, **kwargs):
context = super(setIPView, self).get_context_data(**kwargs)
addr = self.check_for_ip()
if addr is None:
return context
addr.access_type = self.__type
session.add(addr)
session.commit()
r = self.__ctlsrv.call_handler('ip/%s/access' % self.request.remote_addr)
context['result'] = r
return context
def dispatch(self, request, access_type, *args, **kwargs):
if access_type in PFCtl.ip_proxy_types:
self.__type = access_type
else:
self.__type = None
return super(setIPView, self).dispatch(self, request, *args, **kwargs)
| bsd-2-clause | -5,029,251,846,019,144,000 | 34.73494 | 113 | 0.633176 | false |
rdegges/django-twilio | setup.py | 1 | 3277 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from os.path import abspath, dirname, join, normpath
from setuptools import find_packages, setup
import sys
INSTALL_PYTHON_REQUIRES = []
# We are intending to keep up to date with the supported Django versions.
# For the official support, please visit:
# https://docs.djangoproject.com/en/3.0/faq/install/#what-python-version-can-i-use-with-django and you may change the version in the URL to suit your needs, and we will try to update that here too as we upgrade with django.
if sys.version_info[1] == 5:
# py3.5 can run 1.11 < 2.2
django_python_version_install = 'Django>=2.2,<3.0',
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
elif sys.version_info[1] == 6:
# py3.6 can run 1.11 < 3.1 (likely will be <4.0)
django_python_version_install = 'Django>=2.2,<3.2',
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
elif sys.version_info[1] == 7:
# py3.7 is 1.11.17 < 3.1 (likely will be <4.0)
django_python_version_install = 'Django>=2.2,<3.2'
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
elif sys.version_info[1] == 8:
# py3.8 is 2.2.8 < 3.1 (likely will be <4.0)
django_python_version_install = 'Django>=2.2.8,<3.2'
INSTALL_PYTHON_REQUIRES.append(django_python_version_install)
setup(
# Basic package information:
name='django-twilio',
version='0.13.1.post0',
packages=find_packages(),
# Packaging options:
zip_safe=False,
include_package_data=True,
# Package dependencies:
install_requires=[
'setuptools>=36.2',
'twilio>=6.3.0,<7',
'django-phonenumber-field>=0.6',
'phonenumbers>=8.10.22',
] + INSTALL_PYTHON_REQUIRES,
# Metadata for PyPI:
author='Randall Degges',
author_email='[email protected]',
maintainer="Jason Held",
maintainer_email="[email protected]",
license='UNLICENSE',
url='https://github.com/rdegges/django-twilio',
keywords='twilio telephony call phone voip sms django django-twilio',
description='Build Twilio functionality into your Django apps.',
long_description=open(
normpath(join(dirname(abspath(__file__)), 'README.rst'))
).read(),
project_urls={
"Documentation": "https://django-twilio.readthedocs.io/en/latest/",
"Code": "https://github.com/rdegges/django-twilio",
"Tracker": "https://github.com/rdegges/django-twilio/issues",
},
classifiers=[
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Intended Audience :: Developers',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
]
)
| unlicense | 6,593,338,256,977,965,000 | 37.552941 | 223 | 0.646628 | false |
sclc/NAEF | exp_scripts/worker_exp_160531.py | 1 | 13711 | """
Experiment Diary 2016-05-31
"""
import sys
import math
import matplotlib.pyplot as plt
from scipy import io
import numpy as np
from scipy.sparse.linalg import *
sys.path.append("../src/")
from worker import Worker
from native_conjugate_gradient import NativeConjugateGradient
from native_conjugate_gradient import NativeBlockConjugateGradient
from gerschgorin_circle_theorem import GerschgorinCircleTheoremEigenvalueEstimator
from chebyshev_polynomial import ChebyshevPolynomial
from chebyshev_basis_cacg import CBCG
from legendre_basis_cacg import LBCG
from legendre_basis_cacg import BLBCG
from chebyshev_basis_cacg import BCBCG
from presenter import Presenter
from power_iteration import PowerIteration
class WorkerIterativeLinearSystemSolverCG_Exp_160531(Worker):
""" Description: Experiment A
Numerical Method: Naive Conjugate Gradient
tol:
max_iteration:
matrix:
Reference:
1.
"""
def __init__(self, mat_path):
""" """
#print ("WorkerIterativeLinearSystemSolver works good")
Worker.__init__(self)
self._hist_list = []
if mat_path == "":
""" Need to generatre matrix """
print("calling self._matrix_generation")
#self._mat = self._matrix_generation()
else:
self._mat_coo = io.mmread(mat_path)
self._mat = self._mat_coo.tocsr()
self._mat_info = io.mminfo(mat_path)
print("Done reading matrix {}, Row:{}, Col:{}".format( mat_path, self._mat.shape[0], self._mat.shape[1]))
print("mminfo:{}".format(self._mat_info))
if self._mat.getformat() == "csr":
print("Yeah, it is CSR")
def _matrix_generator(self):
""" generation of matrix """
print("_matrix_generator")
def _setup_testbed(self, block_size):
""" this can considered as a basic experiment input descripting """
self._SB = np.random.random( ( self._mat.shape[0],1) )
self._BB = np.random.random( ( self._mat.shape[0],block_size) )
#np.savetxt("/home/scl/tmp/rhs.csv",self._B, delimiter=",")
#self._B = np.ones( ( self._mat.shape[0],6) )
self._SX = np.ones ( (self._mat.shape[1],1) )
self._BX = np.ones ( (self._mat.shape[1],block_size) )
#self._X = np.zeros ( (self._mat.shape[1],1) )
def _setup_numerical_algorithm(self,tol, maxiter, step_val):
""" After a linear solver or other numerical methods loaded
we need to setup the basic prarm for the algorithm
"""
self._tol = tol
self._maxiter = maxiter
self._step_val = step_val
def conduct_experiments(self, block_size, tol, maxiter, step_val):
""" function to condution the experiment """
print("to conduct the experient")
self._setup_testbed(block_size)
self._setup_numerical_algorithm(tol,maxiter,step_val)
#print ("before:{}".format(np.inner(self._X[:,0], self._X[:,0])))
#self._bcbcg_exp()
#self._db_presenter_a()
#self._db_power_iteration()
#self._db_lbcg_exp()
#self._db_blbcg_exp()
#self. _numpy_lstsq_test()
#self._db_cbcg_lstsq()
#self._db_bcbcg_lstsq()
#self._lbcg_least_square_exp()
self._blbcg_least_square_exp()
print("Experiments done")
def _bcbcg_exp(self):
bcbcg_solver_obj = BCBCG()
step_val_a = 3
step_val_b = 5
self._final_X_a, self._final_R_a, self._residual_hist_a = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._B, self._X, step_val_a, self._tol, self._maxiter,0)
self._final_X_b, self._final_R_b, self._residual_hist_b = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._B, self._X, step_val_b, self._tol, self._maxiter,0)
def _db_presenter_a(self):
plot_worker = Presenter()
residual_list = [self._residual_hist_a]
residual_list.append(self._residual_hist_b)
legend_list = ["bcbcg_s3", "bcbcg_s5"]
color_list = ["r", "k"]
# latex style notation
#plot_worker.instant_plot_y_log10(residual_list, "crystm01 $x_1$")
#plot_worker.instant_plot_y_log10(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
plot_worker.instant_plot_y(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def legendre_poly_exp_a(self, order_lo, order_hi):
""" """
x= np.linspace(-1.1,1.1,41)
order_controller = np.zeros(order_hi+1)
y_list = []
plot_worker = Presenter()
legend_list = []
color_list = []
for order_idx in range(order_lo, order_hi+1):
order_controller[order_idx] = 1
legp = np.polynomial.legendre.Legendre( order_controller )
legcoef = np.polynomial.legendre.leg2poly(legp.coef )
poly = np.polynomial.Polynomial(legcoef)
y_list.append( poly(x) )
print(order_idx, " ", poly(x))
legend_list.append( "order_"+str(order_idx) )
color_list.append("k")
order_controller[order_idx] = 0
plot_worker.instant_plot_unified_x_axis(x, y_list, "Legendre Poly" , "x", "y", legend_list, color_list)
def _db_lbcg_exp (self):
""" """
lbcg_solver_obj = LBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
lbcg_solver_obj.lbcg_solver(self._mat, self._B, self._X, 8, self._tol, self._maxiter)
self._final_x_b, self._final_r_b, self._residual_hist_b = \
lbcg_solver_obj.lbcg_solver(self._mat, self._B, self._X, 16, self._tol, self._maxiter)
cbcg_solver_obj = CBCG()
self._final_x_c, self._final_r_c, self._residual_hist_c = \
cbcg_solver_obj.cbcg_solver(self._mat, self._B, self._X, 16, self._tol, self._maxiter)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b, self._residual_hist_c]
legend_list = ["lbcg_s8","lbcg_s16", "cbcg_s16"]
color_list = ["r","k", "b"]
#plot_worker.instant_plot_y_log10(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
plot_worker.instant_plot_y_log10(residual_list, "wathen100", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _db_blbcg_exp(self):
""" """
lbcg_solver_obj = LBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
lbcg_solver_obj.lbcg_solver(self._mat, self._SB, self._SX, 8, self._tol, self._maxiter)
blbcg_solver_obj = BLBCG()
self._final_x_b, self._final_r_b, self._residual_hist_b = \
blbcg_solver_obj.blbcg_solver(self._mat, self._BB, self._BX, 8, self._tol, self._maxiter, 0)
bcbcg_solver_obj = BCBCG()
self._final_x_c, self._final_r_c, self._residual_hist_c = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._BB, self._BX, 8, self._tol, self._maxiter, 0)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b, self._residual_hist_c]
legend_list = ["lbcg_s8","blbcg_s8b10", "bcbcg_s8b10"]
color_list = ["r","k", "b"]
plot_worker.instant_plot_y_log10(residual_list, "bodyy6", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _numpy_lstsq_test (self):
""" """
self._small_mat = np.random.random( ( 5,5) )
self._small_rhs = np.random.random( ( 5,3) )
self._lstsq_res = np.linalg.lstsq(self._small_mat, self._small_rhs)
print (self._small_mat)
print("")
print(self._small_rhs)
print("")
print(self._lstsq_res)
print("")
print(np.matmul(self._small_mat, self._lstsq_res[0]))
#print(type(self._small_mat), "", type(self._lstsq_res))
def _db_cbcg_lstsq (self):
cbcg_solver_obj = CBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
cbcg_solver_obj.cbcg_solver_least_square(self._mat, self._SB, self._SX, self._step_val, self._tol, self._maxiter)
self._final_x_b, self._final_r_b, self._residual_hist_b = \
cbcg_solver_obj.cbcg_solver_least_square(self._mat, self._SB, self._SX, self._step_val, self._tol, self._maxiter)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b]
legend_list = ["cbcg_s2_lstsq","blbcg_s2"]
color_list = ["r","k"]
plot_worker.instant_plot_y_log10(residual_list, "bodyy6", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _db_bcbcg_lstsq (self):
""" """
bcbcg_solver_obj = BCBCG()
self._final_X_a, self._final_R_a, self._residual_hist_a = \
bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB, self._BX, self._step_val, self._tol, self._maxiter,0)
self._final_X_b, self._final_R_b, self._residual_hist_b = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._BB, self._BX, self._step_val, self._tol, self._maxiter,0)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b]
legend_list = ["bcbcg_s20b4_lstsq","bcbcg_s20b4"]
color_list = ["r","k"]
plot_worker.instant_plot_y_log10(residual_list, "crystm02", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _lbcg_least_square_exp (self):
""" """
lbcg_solver_obj = LBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
lbcg_solver_obj.lbcg_solver_least_square(self._mat, self._SB, self._SX, 8, self._tol, self._maxiter)
self._final_x_b, self._final_r_b, self._residual_hist_b = \
lbcg_solver_obj.lbcg_solver_least_square(self._mat, self._SB, self._SX, 18, self._tol, self._maxiter)
cbcg_solver_obj = CBCG()
self._final_x_c, self._final_r_c, self._residual_hist_c = \
cbcg_solver_obj.cbcg_solver_least_square(self._mat, self._SB, self._SX, 8, self._tol, self._maxiter)
self._final_x_d, self._final_r_d, self._residual_hist_d = \
cbcg_solver_obj.cbcg_solver_least_square(self._mat, self._SB, self._SX, 18, self._tol, self._maxiter)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b, self._residual_hist_c, self._residual_hist_d ]
legend_list = ["lbcg_lstsq_s8","lbcg_lstsq_s18" ,"cbcg_lstsq_s8", "cbcg_lstsq_s18" ]
color_list = ["r","k", "b","y"]
#plot_worker.instant_plot_y_log10(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
#plot_worker.instant_plot_y_log10(residual_list, "wathen100", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
plot_worker.instant_plot_y_log10(residual_list, "bodyy06", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _blbcg_least_square_exp(self):
""" """
blbcg_solver_obj = BLBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
blbcg_solver_obj.blbcg_solver_least_square(self._mat, self._BB, self._BX, self._step_val, self._tol, self._maxiter, 0)
bcbcg_solver_obj = BCBCG()
self._final_x_b, self._final_r_b, self._residual_hist_b = \
bcbcg_solver_obj.bcbcg_solver_least_square(self._mat, self._BB, self._BX, self._step_val, self._tol, self._maxiter, 0)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b]
legend_list = ["blbcg_s64b4_lstsq","bcbcg_s64b4_lstsq"]
color_list = ["r","k"]
#plot_worker.instant_plot_y_log10(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
plot_worker.instant_plot_y_log10(residual_list, "bodyy6", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def main ():
# main function for today's experiments
#bad
#mat_path = "/home/scl/MStore/vanbody/vanbody.mtx"
#mat_path = "/home/scl/MStore/olafu/olafu.mtx"
#mat_path = "/home/scl/MStore/raefsky4/raefsky4.mtx"
#mat_path = "/home/scl/MStore/smt/smt.mtx"
#mat_path = "/home/scl/MStore/bcsstk36/bcsstk36.mtx"
#mat_path = "/home/scl/MStore/pdb1HYS/pdb1HYS.mtx"
#mat_path = "/home/scl/MStore/ship_001/ship_001.mtx"
# not so good
#mat_path = "/home/scl/MStore/Dubcova1/Dubcova1.mtx"
#mat_path = "/home/scl/MStore/bcsstk17/bcsstk17.mtx"
#mat_path = "/home/scl/MStore/wathen100/wathen100.mtx"
#mat_path = "/home/scl/MStore/nasa2146/nasa2146.mtx"
#mat_path = "/home/scl/MStore/crystm01/crystm01.mtx"
#mat_path = "/home/scl/MStore/ex13/ex13.mtx"
#mat_path = "/home/scl/MStore/LFAT5/LFAT5.mtx"
#good
mat_path = "/home/scl/MStore/bodyy6/bodyy6.mtx"
#mat_path = "/home/scl/MStore/crystm02/crystm02.mtx"
block_size = 4
tol = 1e-12
maxiter = 800
step_val =64
linear_system_solver_worker_test = WorkerIterativeLinearSystemSolverCG_Exp_160531(mat_path)
linear_system_solver_worker_test.conduct_experiments(block_size,tol,maxiter, step_val)
#linear_system_solver_worker_test.chebyshev_poly_exp_a(0,6)
#linear_system_solver_worker_test.legendre_poly_exp_a(0,6)
#linear_system_solver_worker_test.debug_NativeConjugateGradient()
if __name__ == "__main__":
""" call main funtion for testing """
main()
| gpl-3.0 | 3,732,410,133,351,646,000 | 44.703333 | 138 | 0.594924 | false |
xjchensz/LSFS | LSFS/LSFS_TEST.py | 1 | 1803 | #!usr/bin/python
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import scipy as sp
import os
import random
import time
import sys
def append_module_path():
import sys
paths = [ \
"../gen_data",
"../evaluate",
"../read_data"
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
append_module_path()
import gen_data
import evaluate
import read_data
def test_H():
"""
expected
array([[ 0.66666667, -0.33333333, -0.33333333],
[-0.33333333, 0.66666667, -0.33333333],
[-0.33333333, -0.33333333, 0.66666667]])
"""
return compute_H(3)
def test_norm_2_1():
"""
expected 4.2426406871192857
"""
W = np.array([[1,1],[2,2]])
return norm_2_1(W)
def test_Q():
"""
(np.sqrt(2) + np.sqrt(8)) / [np.sqrt(2), np.sqrt(8)]
expected [[ 3. , 0. ],
[ 0. , 1.5]]
"""
W = np.array([[1,1],[2,2]])
return compute_Q(W)
def print_W(W):
with open("W.txt", "a+") as f:
for w in W:
print(w, file=f)
print("\n========================\n", file=f)
def run_accuracy(fun, XL_train,YL_train,XU_train,YU_train, sel_num=5, output_file_name="feature_order"):
XL, YL, XU, YU = XL_train.copy(), YL_train.copy(), XU_train.copy(), YU_train.copy()
if fun.__name__.lower() == "lsfs":
YL = read_data.label_n1_to_nc(YL)
YU = read_data.label_n1_to_nc(YU)
feature_order, time_dual = fun(XL, YL, XU, output_file_name=output_file_name)
X,Y = evaluate.select_data(XL_train, YL_train, XU_train, YU_train,\
feature_order, sel_num=sel_num)
a = evaluate.run_acc(X,Y)
print("accuracy", ":", a)
return feature_order, time_dual, a | gpl-3.0 | -6,408,282,040,506,266,000 | 21 | 104 | 0.533555 | false |
Subsets and Splits